repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
mmadsen/sklearn-mmadsen | sklearn_mmadsen/dnn/dnnestimators.py | 1 | 7032 | #!/usr/bin/env python
import numpy as np
import pprint as pp
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam
from keras.regularizers import WeightRegularizer
from keras.callbacks import EarlyStopping
class ParameterizedDNNClassifier(BaseEstimator, ClassifierMixin):
"""
Constructs a DNN classifier using the Keras library suitable for grid or random search
cross-validation using scikit-learn scaffolding. Defaults to a single hidden layer
of ReLU activations, but is configurable to multiple layers with specifiable sizes,
activation functions, dropout percentages, etc.
The classifier subclasses scikit-learn's BaseEstimation and ClassifierMixin, and thus is
ready to function in a cross-validation search or Pipeline for hyperparameter optimization.
The classifier expects data in the following format:
X: numpy array with n columns of type np.float32
Y: numpy array with m columns, one-hot encoding the correct label for the corresponding row in X
the output dimension of the classifier is thus the number of classes (and columns) in Y
the input dimension of the classifier is thus the number of features (and columns) in X
Because it treats target/labels as one-hot encoded (where most of scikit-learn's scoring functions
except a single column with a class label instead), the class provides its own prediction and scoring
functions that allow it to interoperate.
The class has been tested with both Theano/GPU and TensorFlow backends, on CentOS 7.X Linux and OS X.
Parameters:
-----------
input_dimension: int, required. Number of features in the dataset.
output_dimension: int, required. Number of class labels the model is predicting
dropout_fraction: float, required. Fraction of input and dense layers to drop out to curb overfitting
dense_activation: string, required. Name of a Keras activation function to use on input and dense layers
output_activation: string, required. Name of a Keras activation function to use for classification on the output layer (usually softmax)
num_dense_hidden: int, required. Number of dense hidden layer/dropout pairs between input and output_activation
hidden_sizes: array of int, required. Gives the size of each hidden layer, and a final entry for the size of the input dimension to the output layer
sgd_lr: float, required. Learning rate for the SGD optimizer.
decay: float, required. Decay rate for the learning rate during training.
momentum: float, required. Momentum value for Nesterov momentum gradient descent.
epochs: int, required. Number of epochs to train unless early stopped.
batch_size: int, required. Size of mini batches to use during training.
verbose: int, required. Print progress information on training if 1, quiet if 0
"""
def __init__(self,
input_dimension=10,
output_dimension=2,
dropout_fraction=0.5,
dense_activation='relu',
output_activation='softmax',
num_dense_hidden=1,
hidden_sizes=[5, 10],
sgd_lr=0.01,
decay=1e-6,
momentum=0.9,
epochs=100,
batch_size=500,
verbose=1):
self.input_dim = input_dimension
self.output_dim = output_dimension
self.drop_frac = dropout_fraction
self.dense_activation = dense_activation
self.output_activation = output_activation
self.num_dense = num_dense_hidden
self.hidden_dim = hidden_sizes
self.sgd_lr = sgd_lr
self.sgd_decay = decay
self.sgd_momentum = momentum
self.epochs = epochs
self.batch_size = batch_size
self.verbose = verbose
def fit(self, X, y):
model = Sequential()
model.add(Dense(input_dim=self.input_dim, output_dim=self.hidden_dim[0],
init='glorot_uniform', activation=self.dense_activation))
model.add(Dropout(self.drop_frac))
for i in range(0, self.num_dense):
j = i + 1
model.add(Dense(input_dim=self.hidden_dim[i], output_dim=self.hidden_dim[i + 1],
init='glorot_uniform', activation=self.dense_activation))
model.add(Dropout(self.drop_frac))
model.add(Dense(input_dim=self.hidden_dim[self.num_dense], output_dim=self.output_dim,
activation=self.output_activation))
# print model.summary()
solver = SGD(lr=self.sgd_lr, decay=self.sgd_decay, momentum=self.sgd_momentum, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=solver)
self.compiled_model = model
self.early_stopping = EarlyStopping(monitor='val_loss', patience=2)
self.history = self.compiled_model.fit(X,
y,
nb_epoch=self.epochs,
batch_size=self.batch_size,
validation_split=0.1,
verbose=self.verbose,
show_accuracy=True,
callbacks=[self.early_stopping])
def predict(self, X):
return self.compiled_model.predict_classes(X, batch_size=self.batch_size)
def score(self, X, y):
preds = self.compiled_model.predict_classes(X, batch_size=self.batch_size)
actuals = np.argmax(y, axis=1)
return accuracy_score(actuals, preds)
def get_params(self, deep=True):
return {
'input_dimension': self.input_dim,
'output_dimension': self.output_dim,
'dropout_fraction': self.drop_frac,
'dense_activation': self.dense_activation,
'output_activation': self.output_activation,
'num_dense_hidden': self.num_dense,
'hidden_sizes': self.hidden_dim,
'sgd_lr': self.sgd_lr,
'decay': self.sgd_decay,
'momentum': self.sgd_momentum,
'epochs': self.epochs,
'batch_size': self.batch_size,
'verbose': self.verbose
}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_history(self):
"""
Returns the fitting accuracy at each epoch. This is a Keras-specific function,
and thus isn't called through the scikit-learn API. You will need to have access
to the actual estimator object (not a Pipeline object) to call this method.
"""
return self.history.history
| apache-2.0 |
SANBI-SA/tools-iuc | tools/cwpair2/cwpair2_util.py | 3 | 13741 | import bisect
import csv
import os
import sys
import traceback
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Data outputs
DETAILS = 'D'
MATCHED_PAIRS = 'MP'
ORPHANS = 'O'
# Data output formats
GFF_EXT = 'gff'
TABULAR_EXT = 'tabular'
# Statistics historgrams output directory.
HISTOGRAM = 'H'
# Statistics outputs
FINAL_PLOTS = 'F'
PREVIEW_PLOTS = 'P'
STATS_GRAPH = 'C'
# Graph settings.
COLORS = 'krg'
Y_LABEL = 'Peak-pair counts'
X_LABEL = 'Peak-pair distance (bp)'
TICK_WIDTH = 3
ADJUST = [0.140, 0.9, 0.9, 0.1]
PLOT_FORMAT = 'pdf'
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
class FrequencyDistribution(object):
def __init__(self, start, end, binsize=10, d=None):
self.start = start
self.end = end
self.dist = d or {}
self.binsize = binsize
def get_bin(self, x):
"""
Returns the bin in which a data point falls
"""
return self.start + (x - self.start) // self.binsize * self.binsize + self.binsize / 2.0
def add(self, x):
x = self.get_bin(x)
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for i in range(self.start, self.end, self.binsize):
center = self.get_bin(i)
x.append(center)
y.append(self.dist.get(center, 0))
return x, y
def mode(self):
return max(self.dist.items(), key=lambda data: data[1])[0]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def distance(peak1, peak2):
return (peak2[1] + peak2[2]) / 2 - (peak1[1] + peak1[2]) / 2
def gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}):
return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs))
def gff_attrs(d):
if not d:
return '.'
return ';'.join('%s=%s' % item for item in d.items())
def parse_chromosomes(reader):
# This version of cwpair2 accepts only gff format as input.
chromosomes = {}
next(reader)
for line in reader:
cname, junk, junk, start, end, value, strand, junk, junk = line
start = int(start)
end = int(end)
value = float(value)
if cname not in chromosomes:
chromosomes[cname] = []
peaks = chromosomes[cname]
peaks.append((strand, start, end, value))
return chromosomes
def perc95(chromosomes):
"""
Returns the 95th percentile value of the given chromosomes.
"""
values = []
for peaks in chromosomes.values():
for peak in peaks:
values.append(peak[3])
values.sort()
# Get 95% value
return values[int(len(values) * 0.95)]
def filter(chromosomes, threshold=0.05):
"""
Filters the peaks to those above a threshold. Threshold < 1.0 is interpreted
as a proportion of the maximum, >=1.0 as an absolute value.
"""
if threshold < 1:
p95 = perc95(chromosomes)
threshold = p95 * threshold
# Make the threshold a proportion of the
for cname, peaks in chromosomes.items():
chromosomes[cname] = [peak for peak in peaks if peak[3] > threshold]
def split_strands(chromosome):
watson = [peak for peak in chromosome if peak[0] == '+']
crick = [peak for peak in chromosome if peak[0] == '-']
return watson, crick
def all_pair_distribution(chromosomes, up_distance, down_distance, binsize):
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
for cname, data in chromosomes.items():
watson, crick = split_strands(data)
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
for cpeak in get_window(crick, peak, up_distance, down_distance, keys):
dist.add(distance(peak, cpeak))
return dist
def make_keys(crick):
return [(data[1] + data[2]) // 2 for data in crick]
def get_window(crick, peak, up_distance, down_distance, keys=None):
"""
Returns a window of all crick peaks within a distance of a watson peak.
crick strand MUST be sorted by distance
"""
strand, start, end, value = peak
midpoint = (start + end) // 2
lower = midpoint - up_distance
upper = midpoint + down_distance
keys = keys or make_keys(crick)
start_index = bisect.bisect_left(keys, lower)
end_index = bisect.bisect_right(keys, upper)
return [cpeak for cpeak in crick[start_index:end_index]]
def match_largest(window, peak):
if not window:
return None
return max(window, key=lambda cpeak: cpeak[3])
def match_closest(window, peak):
if not window:
return None
def key(cpeak):
d = distance(peak, cpeak)
# Search negative distances last
if d < 0:
# And then prefer less negative distances
d = 10000 - d
return d
return min(window, key=key)
def match_mode(window, peak, mode):
if not window:
return None
return min(window, key=lambda cpeak: abs(distance(peak, cpeak) - mode))
METHODS = {'mode': match_mode, 'closest': match_closest, 'largest': match_largest}
def frequency_plot(freqs, fname, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
x, y = freq.graph_series()
pyplot.plot(x, y, '%s-' % COLORS[i])
if len(freqs) > 1:
pyplot.legend(labels)
pyplot.xlim(freq.start, freq.end)
pyplot.ylim(ymin=0)
pyplot.ylabel(Y_LABEL)
pyplot.xlabel(X_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
# Get the current axes
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(fname)
def create_directories():
# Output histograms in pdf.
os.mkdir(HISTOGRAM)
os.mkdir('data_%s' % DETAILS)
os.mkdir('data_%s' % ORPHANS)
os.mkdir('data_%s' % MATCHED_PAIRS)
def process_file(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
statistics = []
for match_method in match_methods:
stats = perform_process(dataset_path,
galaxy_hid,
match_method,
threshold,
up_distance,
down_distance,
binsize,
output_files)
statistics.append(stats)
if output_files == 'all' and method == 'all':
frequency_plot([s['dist'] for s in statistics],
statistics[0]['graph_path'],
labels=list(METHODS.keys()))
return statistics
def perform_process(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
output_details = output_files in ["all", "matched_pair_orphan_detail"]
output_plots = output_files in ["all"]
output_orphans = output_files in ["all", "matched_pair_orphan", "matched_pair_orphan_detail"]
# Keep track of statistics for the output file
statistics = {}
input = csv.reader(open(dataset_path, 'rt'), delimiter='\t')
fpath, fname = os.path.split(dataset_path)
statistics['fname'] = '%s: data %s' % (method, str(galaxy_hid))
statistics['dir'] = fpath
if threshold >= 1:
filter_string = 'fa%d' % threshold
else:
filter_string = 'f%d' % (threshold * 100)
fname = '%s_%su%dd%d_on_data_%s' % (method, filter_string, up_distance, down_distance, galaxy_hid)
def make_histogram_path(output_type, fname):
return os.path.join(HISTOGRAM, 'histogram_%s_%s.%s' % (output_type, fname, PLOT_FORMAT))
def make_path(output_type, extension, fname):
# Returns the full path for an output.
return os.path.join(output_type, '%s_%s.%s' % (output_type, fname, extension))
def td_writer(output_type, extension, fname):
# Returns a tab-delimited writer for a specified output.
output_file_path = make_path(output_type, extension, fname)
return csv.writer(open(output_file_path, 'wt'), delimiter='\t')
try:
chromosomes = parse_chromosomes(input)
except Exception:
stop_err('Unable to parse file "%s".\n%s' % (dataset_path, traceback.format_exc()))
if output_details:
# Details
detailed_output = td_writer('data_%s' % DETAILS, TABULAR_EXT, fname)
detailed_output.writerow(('chrom', 'start', 'end', 'value', 'strand') * 2 + ('midpoint', 'c-w reads sum', 'c-w distance (bp)'))
if output_plots:
# Final Plot
final_plot_path = make_histogram_path(FINAL_PLOTS, fname)
if output_orphans:
# Orphans
orphan_output = td_writer('data_%s' % ORPHANS, TABULAR_EXT, fname)
orphan_output.writerow(('chrom', 'strand', 'start', 'end', 'value'))
if output_plots:
# Preview Plot
preview_plot_path = make_histogram_path(PREVIEW_PLOTS, fname)
# Matched Pairs.
matched_pairs_output = td_writer('data_%s' % MATCHED_PAIRS, GFF_EXT, fname)
statistics['stats_path'] = 'statistics.%s' % TABULAR_EXT
if output_plots:
statistics['graph_path'] = make_histogram_path(STATS_GRAPH, fname)
statistics['perc95'] = perc95(chromosomes)
if threshold > 0:
# Apply filter
filter(chromosomes, threshold)
if method == 'mode':
freq = all_pair_distribution(chromosomes, up_distance, down_distance, binsize)
mode = freq.mode()
statistics['preview_mode'] = mode
if output_plots:
frequency_plot([freq], preview_plot_path, title='Preview frequency plot')
else:
statistics['preview_mode'] = 'NA'
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
orphans = 0
# x will be used to archive the summary dataset
x = []
for cname, chromosome in chromosomes.items():
# Each peak is (strand, start, end, value)
watson, crick = split_strands(chromosome)
# Sort by value of each peak
watson.sort(key=lambda data: -float(data[3]))
# Sort by position to facilitate binary search
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
window = get_window(crick, peak, up_distance, down_distance, keys)
if method == 'mode':
match = match_mode(window, peak, mode)
else:
match = METHODS[method](window, peak)
if match:
midpoint = (match[1] + match[2] + peak[1] + peak[2]) // 4
d = distance(peak, match)
dist.add(d)
# Simple output in gff format.
x.append(gff_row(cname,
source='cwpair',
start=midpoint,
end=midpoint + 1,
score=peak[3] + match[3],
attrs={'cw_distance': d}))
if output_details:
detailed_output.writerow((cname,
peak[1],
peak[2],
peak[3],
'+',
cname,
match[1],
match[2],
match[3], '-',
midpoint,
peak[3] + match[3],
d))
i = bisect.bisect_left(keys, (match[1] + match[2]) / 2)
del crick[i]
del keys[i]
else:
if output_orphans:
orphan_output.writerow((cname, peak[0], peak[1], peak[2], peak[3]))
# Keep track of orphans for statistics.
orphans += 1
# Remaining crick peaks are orphans
if output_orphans:
for cpeak in crick:
orphan_output.writerow((cname, cpeak[0], cpeak[1], cpeak[2], cpeak[3]))
# Keep track of orphans for statistics.
orphans += len(crick)
# Sort output descending by score.
x.sort(key=lambda data: float(data[5]), reverse=True)
# Writing a summary to gff format file
for row in x:
row_tmp = list(row)
# Dataset in tuple cannot be modified in Python, so row will
# be converted to list format to add 'chr'.
if row_tmp[0] == "999":
row_tmp[0] = 'chrM'
elif row_tmp[0] == "998":
row_tmp[0] = 'chrY'
elif row_tmp[0] == "997":
row_tmp[0] = 'chrX'
else:
row_tmp[0] = row_tmp[0]
# Print row_tmp.
matched_pairs_output.writerow(row_tmp)
statistics['paired'] = dist.size() * 2
statistics['orphans'] = orphans
statistics['final_mode'] = dist.mode()
if output_plots:
frequency_plot([dist], final_plot_path, title='Frequency distribution')
statistics['dist'] = dist
return statistics
| mit |
EricSB/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_0/fullgrid/IR.py | 30 | 9364 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine; will be called later
numplots = 12
def add_sub_plot(sub_num):
plt.subplot(3,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 9:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [75, #AR 3 7135
76, #TOTL 7325
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87] #H 1 9546
#create z array for this plot with given lines
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty IR Lines", fontsize=14)
# ---------------------------------------------------
for i in range(12):
add_sub_plot(i)
ax1 = plt.subplot(3,4,1)
add_patches(ax1)
print "figure complete"
plt.savefig('Dusty_Near_IR.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
niltonlk/nest-simulator | pynest/nest/tests/test_spatial/test_spatial_distributions.py | 7 | 30540 | # -*- coding: utf-8 -*-
#
# test_spatial_distributions.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests distribution of connections created with spatial distributions.
Original code by Daniel Hjertholm with Birgit Kriener.
Converted to automated test by Hans Ekkehard Plesser.
For theory, see
D. Hjertholm, Statistical tests for connection algorithms for
structured neural networks, MSc thesis, Norwegian University of
Life Science, 2013. http://hdl.handle.net/11250/189117.
"""
import math
import numpy as np
import numpy.random as rnd
import scipy.integrate
import scipy.stats
import scipy.special
import unittest
import nest
try:
# for debugging
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# make sure we can open a window; DISPLAY may not be set
fig = plt.figure()
plt.close(fig)
PLOTTING_POSSIBLE = True
except Exception:
PLOTTING_POSSIBLE = False
# If False, tests will be run; otherwise, a single case will be plotted.
DEBUG_MODE = False
# Constant defining sensitivity of test (minimal p-value to pass)
P_MIN = 0.1
# Seed for all simulations
SEED = 1234567
class SpatialTester(object):
"""Tests for spatially structured networks."""
def __init__(self, seed, dim, L, N, spatial_distribution, distribution_params=None,
open_bc=False, x0=0., y0=0.):
"""
Construct a test object.
Parameters
----------
seed : Random seed for test
dim : Dimensions (2 or 3)
L : Side length of area / volume.
N : Number of nodes.
spatial_distribution : Name of spatial distribution to use.
distribution_params : Dict with params to update.
open_bc : Network with open boundary conditions
x0, y0 : Location of source neuron; open_bc only
Note
----
For each new distribution to be added, the following needs to be
defined:
self._<distribution> : function implementing distribution function
spatial_distributions entry : mapping distribution name to distribution function
default_params : default set of parameters for distribution
"""
if dim != 2 and open_bc:
raise ValueError("open_bc only supported for 2D")
self._seed = seed
self._dimensions = dim
self._L = float(L)
self._N = N
self._open_bc = open_bc
self._x_d, self._y_d = x0, y0
if (self._dimensions == 2):
if (self._open_bc):
self._max_dist = self._L * math.sqrt(2)
self._pdf = self._pdf_2d_obc
else:
self._max_dist = self._L / math.sqrt(2)
self._pdf = self._pdf_2d
elif (self._dimensions == 3):
self._max_dist = self._L * math.sqrt(3) / 2
self._pdf = self._pdf_3d
self._target_dists = None
self._all_dists = None
spatial_distributions = {
'constant': self._constant,
'linear': self._linear,
'exponential': self._exponential,
'gaussian': self._gauss,
'gaussian2d': self._gauss2d,
'gamma': self._gamma}
self._distribution = spatial_distributions[spatial_distribution]
default_params = {
'constant': 1.,
'linear': {'a': -math.sqrt(2) / self._L, 'c': 1.0},
'exponential': {'a': 1.0, 'c': 0.0, 'tau': -self._L /
(math.sqrt(2) * math.log((.1 - 0) / 1))},
'gaussian': {'p_center': 1., 'sigma': self._L / 4.,
'mean': 0., 'c': 0.},
'gaussian2d': {'p_center': 1., 'sigma_x': self._L / 4., 'sigma_y': self._L / 4.,
'mean_x': 0.5, 'mean_y': 0.7, 'rho': 0.5, 'c': 0.},
'gamma': {'kappa': 3., 'theta': self._L / 4.}}
self._params = default_params[spatial_distribution]
if distribution_params is not None:
if spatial_distribution == 'constant':
self._params = distribution_params
else:
self._params.update(distribution_params)
# Pre-calculate constant variables for efficiency
self._calculate_constants(spatial_distribution)
if self._dimensions == 3:
maskdict = {'box': {'lower_left': [-self._L / 2.] * 3,
'upper_right': [self._L / 2.] * 3}}
elif self._dimensions == 2 and not self._open_bc:
maskdict = {'rectangular': {'lower_left': [-self._L / 2.] * 2,
'upper_right': [self._L / 2.] * 2}}
elif self._dimensions == 2 and self._open_bc:
maskdict = {'rectangular': {'lower_left': [-self._L] * 2,
'upper_right': [self._L] * 2}}
if spatial_distribution == 'constant':
distribution = nest.CreateParameter('constant', {'value': self._params})
elif spatial_distribution == 'linear':
distribution = self._params['c'] + self._params['a'] * nest.spatial.distance
elif spatial_distribution == 'exponential':
distribution = self._params['c'] + nest.spatial_distributions.exponential(
nest.spatial.distance,
beta=self._params['tau'])
elif spatial_distribution == 'gaussian':
distribution = self._params['c'] + nest.spatial_distributions.gaussian(
nest.spatial.distance,
mean=self._params['mean'],
std=self._params['sigma'])
elif spatial_distribution == 'gaussian2d':
distribution = self._params['c'] + nest.spatial_distributions.gaussian2D(
nest.spatial.distance,
nest.spatial.distance,
mean_x=self._params['mean_x'],
mean_y=self._params['mean_y'],
std_x=self._params['sigma_x'],
std_y=self._params['sigma_y'],
rho=self._params['rho'])
elif spatial_distribution == 'gamma':
distribution = nest.spatial_distributions.gamma(
nest.spatial.distance,
kappa=self._params['kappa'],
theta=self._params['theta'])
self._conndict = {'rule': 'pairwise_bernoulli',
'p': distribution,
'mask': maskdict}
def _calculate_constants(self, spatial_distribution):
"""Calculate constant variables used when calculating distributions and probability density functions
Variables that can be pre-calculated are calculated here to make the calculation of distributions
and probability density functions more efficient.
"""
# Constants for spatial distribution functions
if spatial_distribution == 'gaussian':
self.two_sigma2 = 2. * self._params['sigma']**2
if spatial_distribution == 'gaussian2d':
self.sigmax2 = self._params['sigma_x']**2
self.sigmay2 = self._params['sigma_y']**2
elif spatial_distribution == 'gamma':
self.kappa_m_1 = self._params['kappa'] - 1
self.gamma_kappa_mul_theta_pow_kappa = (scipy.special.gamma(self._params['kappa']) *
self._params['theta']**self._params['kappa'])
# Constants for pdfs
x0, y0 = self._roi_2d() # move coordinates to the right reference area
# Constants used when using open boundary conditions
self.a = self._L / 2. - x0 # used to calculate alpha
self.b = self._L / 2. - y0 # used to calculate beta
self.c = self._L / 2. + x0 # used to calculate gamma
self.d = self._L / 2. + y0 # used to calculate delta
self.sqrt_a2_b2 = math.sqrt(self.a ** 2 + self.b ** 2)
self.sqrt_a2_d2 = math.sqrt(self.a ** 2 + self.d ** 2)
self.sqrt_d2_c2 = math.sqrt(self.d ** 2 + self.c ** 2)
self._L_half = self._L / 2.
self.pi_half = math.pi / 2.
self.two_pi = 2 * math.pi
self.four_pi = 4 * math.pi
def _constant(self, _):
"""Constant spatial distribution"""
return self._params
def _linear(self, D):
"""Linear spatial distribution"""
return self._params['c'] + self._params['a'] * D
def _exponential(self, D):
"""Exponential spatial distribution"""
return self._params['c'] + self._params['a'] * math.exp(-D / self._params['tau'])
def _gauss(self, D):
"""Gaussian spatial distribution"""
return (self._params['c'] +
self._params['p_center'] *
math.exp(-(D - self._params['mean']) ** 2 / self.two_sigma2))
def _gauss2d(self, D):
"""Gaussian2D spatial distribution"""
x_term = (D - self._params['mean_x'])**2 / self.sigmax2
y_term = (D - self._params['mean_y'])**2 / self.sigmay2
xy_term = (D - self._params['mean_x']) * (D - self._params['mean_y']) / \
(self._params['sigma_x']*self._params['sigma_y'])
return (self._params['c'] +
self._params['p_center'] *
math.exp(- (x_term + y_term - 2*self._params['rho']*xy_term)/(2*(1-self._params['rho']**2))))
def _gamma(self, D):
"""Gamma spatial distribution"""
return (D**self.kappa_m_1 /
self.gamma_kappa_mul_theta_pow_kappa *
math.exp(-D / self._params['theta']))
def _create_distance_data(self):
self._reset(self._seed)
self._build()
self._connect()
self._target_dists = sorted(self._target_distances())
self._all_dists = self._all_distances()
def _reset(self, seed):
"""
Reset NEST and seed PRNGs.
Parameters
----------
seed: PRNG seed value.
"""
nest.ResetKernel()
if seed is None:
seed = rnd.randint(10 ** 10)
seed = 3 * seed # Reduces probability of overlapping seed values.
rnd.seed(seed)
nest.SetKernelStatus({'rng_seed': seed})
def _build(self):
"""Create populations."""
if self._open_bc:
x = rnd.uniform(-self._L / 2., self._L / 2., self._N)
y = rnd.uniform(-self._L / 2., self._L / 2., self._N)
pos = list(zip(x, y))
self._ls = nest.Create('iaf_psc_alpha',
positions=nest.spatial.free(
[[self._x_d, self._y_d]],
edge_wrap=False))
self._lt = nest.Create('iaf_psc_alpha',
positions=nest.spatial.free(
pos,
edge_wrap=False))
self._driver = self._ls
else:
x = rnd.uniform(-self._L / 2., self._L / 2., self._N)
y = rnd.uniform(-self._L / 2., self._L / 2., self._N)
if self._dimensions == 3:
z = rnd.uniform(-self._L / 2., self._L / 2., self._N)
pos = list(zip(x, y, z))
else:
pos = list(zip(x, y))
self._ls = nest.Create('iaf_psc_alpha',
positions=nest.spatial.free(
[[0.] * self._dimensions],
[self._L] * self._dimensions,
edge_wrap=True))
self._lt = nest.Create('iaf_psc_alpha',
positions=nest.spatial.free(
pos, [self._L] * self._dimensions,
edge_wrap=True))
self._driver = nest.FindCenterElement(self._ls)
def _connect(self):
"""Connect populations."""
nest.Connect(self._ls, self._lt, self._conndict)
def _all_distances(self):
"""Return distances to all nodes in target population."""
return nest.Distance(self._driver, self._lt)
def _target_distances(self):
"""Return distances from source node to connected nodes."""
# Distance from source node to all nodes in target population
dist = np.array(nest.Distance(self._driver, self._lt))
# Target nodes
connections = nest.GetConnections(source=self._driver)
target_array = np.array(connections.target)
# Convert lt node IDs to a NumPy array
lt_array = np.array(self._lt.tolist())
# Pick distance values of connected targets only
target_dist = dist[np.isin(lt_array, target_array)]
return target_dist
def _positions(self):
"""Return positions of all nodes."""
return [tuple(pos) for pos in
nest.GetPosition(self._lt)]
def _target_positions(self):
"""Return positions of all connected target nodes."""
return [tuple(pos) for pos in
nest.GetTargetPositions(self._driver, self._lt)[0]]
def _roi_2d(self):
"""
Moves coordinates (x,y) to triangle area (x',y') in [0,L/2]X[0,x']
without loss of generality
"""
self._x_d = -self._x_d if (self._x_d >= -self._L / 2.) and (self._x_d < 0) else self._x_d
self._y_d = -self._y_d if (self._y_d >= -self._L / 2.) and (self._y_d < 0) else self._y_d
return np.array([self._x_d, self._y_d]) if self._x_d > self._y_d else np.array([self._y_d, self._x_d])
def _pdf_2d(self, D):
"""Calculate the probability density function in 2D, at the distance D"""
if D <= self._L_half:
return max(0., min(1., self._distribution(D))) * math.pi * D
elif self._L_half < D <= self._max_dist:
return max(0., min(1., self._distribution(D))) * D * (math.pi - 4. * math.acos(self._L / (D * 2.)))
else:
return 0.
def _pdf_2d_obc(self, D):
"""Calculate the probability density function in 2D with open boundary conditions, at the distance D"""
# calculate alpha, beta, gamma, delta:
alpha = math.acos(self.a / D) if self.a / D <= 1. else 0.
beta = math.acos(self.b / D) if self.b / D <= 1. else 0.
gamma = math.acos(self.c / D) if self.c / D <= 1. else 0.
delta = math.acos(self.d / D) if self.d / D <= 1. else 0.
kofD = max(0., min(1., self._distribution(D)))
if (D >= 0) and (D < self.a):
return self.two_pi * D * kofD
if (self.sqrt_a2_b2 > self.c and self.sqrt_a2_d2 > self.c and
D >= self.c and D < self.sqrt_a2_b2):
return 2 * D * (math.pi - alpha - beta - delta - gamma) * kofD
if D >= self.sqrt_d2_c2:
return 0.
def _pdf_3d(self, D):
"""Calculate the probability density function in 3D, at the distance D"""
if D <= self._L_half:
return max(0., min(1., self._distribution(D))) * self.four_pi * D ** 2.
elif self._L_half < D <= self._L / math.sqrt(2):
return max(0., min(1., self._distribution(D))) * self.two_pi * D * (3. * self._L - 4. * D)
elif self._L / math.sqrt(2) < D <= self._max_dist:
A = self.four_pi * D ** 2.
C = self.two_pi * D * (D - self._L_half)
alpha = math.asin(1. / math.sqrt(2. - self._L ** 2. / (2. * D ** 2.)))
beta = self.pi_half
gamma = math.asin(math.sqrt((1. - .5 * (self._L / D) ** 2.) /
(1. - .25 * (self._L / D) ** 2.)))
T = D ** 2. * (alpha + beta + gamma - math.pi)
return (max(0., min(1., self._distribution(D))) *
(A + 6. * C * (-1. + 4. * gamma / math.pi) - 48. * T))
else:
return 0.
def _cdf(self, D):
"""
Normalized cumulative distribution function (CDF).
Parameters
----------
D: Iterable of distances in interval [0, max_dist].
Return values
-------------
List of CDF(d) for each distance d in D.
"""
cdf = np.zeros(len(D))
last_d = 0.
for i, d in enumerate(D):
cdf[i] = scipy.integrate.quad(self._pdf, last_d, d)[0]
last_d = d
cdf = np.cumsum(cdf)
top = scipy.integrate.quad(self._pdf, 0, self._max_dist)[0]
normed_cdf = cdf / top
return normed_cdf
def ks_test(self):
"""
Perform a Kolmogorov-Smirnov GOF test on the distribution
of distances to connected nodes.
Return values
-------------
KS statistic.
p-value from KS test.
"""
if self._target_dists is None:
self._create_distance_data()
ks, p = scipy.stats.kstest(self._target_dists, self._cdf,
alternative='two_sided')
return ks, p
def z_test(self):
"""
Perform a Z-test on the total number of connections.
Return values
-------------
Standard score (z-score).
Two-sided p-value.
"""
if self._target_dists is None or self._all_dists is None:
self._create_distance_data()
num_targets = len(self._target_dists)
ps = ([max(0., min(1., self._distribution(D)))
for D in self._all_dists])
expected_num_targets = sum(ps)
variance_num_targets = sum([p * (1. - p) for p in ps])
if variance_num_targets == 0:
return np.nan, 1.0
else:
sd = math.sqrt(variance_num_targets)
z = abs((num_targets - expected_num_targets) / sd)
p = 2. * (1. - scipy.stats.norm.cdf(z))
return z, p
if PLOTTING_POSSIBLE:
class PlottingSpatialTester(SpatialTester):
"""Add plotting capability to SpatialTester."""
def __init__(self, seed, dim, L, N, spatial_distribution, distribution_params=None,
open_bc=False, x0=0., y0=0.):
SpatialTester.__init__(self, seed, dim, L, N, spatial_distribution,
distribution_params, open_bc, x0, y0)
def show_network(self):
"""Plot nodes in the network."""
# Adjust size of nodes in plot based on number of nodes.
nodesize = max(0.01, round(111. / 11 - self._N / 1100.))
figsize = (8, 6) if self._dimensions == 3 else (6, 6)
fig = plt.figure(figsize=figsize)
positions = self._positions()
connected = self._target_positions()
not_connected = set(positions) - set(connected)
x1 = [pos[0] for pos in not_connected]
y1 = [pos[1] for pos in not_connected]
x2 = [pos[0] for pos in connected]
y2 = [pos[1] for pos in connected]
if self._dimensions == 2:
plt.scatter(x1, y1, s=nodesize, marker='o', color='grey')
plt.scatter(x2, y2, s=nodesize, marker='o', color='red')
if self._dimensions == 3:
ax = fig.add_subplot(111, projection='3d')
z1 = [pos[2] for pos in not_connected]
z2 = [pos[2] for pos in connected]
ax.scatter(x1, y1, z1, s=nodesize, marker='o', color='grey')
ax.scatter(x2, y2, z2, s=nodesize, marker='o', color='red')
plt.xlabel(r'$x$', size=24)
plt.ylabel(r'$y$', size=24)
plt.xticks(size=16)
plt.yticks(size=16)
plt.xlim(-0.505, 0.505)
plt.ylim(-0.505, 0.505)
plt.subplots_adjust(bottom=0.15, left=0.17)
def show_CDF(self):
"""
Plot the cumulative distribution function (CDF) of
source-target distances.
"""
plt.figure()
x = np.linspace(0, self._max_dist, 1000)
cdf = self._cdf(x)
plt.plot(x, cdf, '-', color='black', linewidth=3,
label='Theory', zorder=1)
y = [(i + 1.) / len(self._target_dists)
for i in range(len(self._target_dists))]
plt.step([0.0] + self._target_dists, [0.0] + y, color='red',
linewidth=1, label='Empirical', zorder=2)
plt.ylim(0, 1)
plt.xlabel('Distance')
plt.ylabel('CDF')
plt.legend(loc='center right')
def show_PDF(self, bins=100):
"""
Plot the probability density function of source-target distances.
Parameters
----------
bins: Number of histogram bins for PDF plot.
"""
plt.figure()
x = np.linspace(0, self._max_dist, 1000)
area = scipy.integrate.quad(self._pdf, 0, self._max_dist)[0]
y = np.array([self._pdf(D) for D in x]) / area
plt.plot(x, y, color='black', linewidth=3, label='Theory',
zorder=1)
plt.hist(self._target_dists, bins=bins, histtype='step',
linewidth=1, normed=True, color='red',
label='Empirical', zorder=2)
plt.ylim(ymin=0.)
plt.xlabel('Distance')
plt.ylabel('PDF')
plt.legend(loc='center right')
class TestSpatial2D(unittest.TestCase):
"""
Test for 2D distributions.
"""
def test_constant(self):
distribution = 'constant'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_linear(self):
distribution = 'linear'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_exponential(self):
distribution = 'exponential'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gaussian(self):
distribution = 'gaussian'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gaussian2d(self):
distribution = 'gaussian2d'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gamma(self):
distribution = 'gamma'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
class TestSpatial2DOBC(unittest.TestCase):
"""
Test for 2D distributions with open boundary conditions.
"""
def test_constant(self):
distribution = 'constant'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_linear(self):
distribution = 'linear'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_exponential(self):
distribution = 'exponential'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gaussian(self):
distribution = 'gaussian'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gaussian2d(self):
distribution = 'gaussian2d'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gamma(self):
distribution = 'gamma'
test = SpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution=distribution, open_bc=True)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
class TestSpatial3D(unittest.TestCase):
"""
Test for 3D distributions.
"""
def test_constant(self):
distribution = 'constant'
test = SpatialTester(seed=SEED, dim=3, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_linear(self):
distribution = 'linear'
test = SpatialTester(seed=SEED, dim=3, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_exponential(self):
distribution = 'exponential'
test = SpatialTester(seed=SEED, dim=3, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gaussian(self):
distribution = 'gaussian'
test = SpatialTester(seed=SEED, dim=3, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def test_gamma(self):
distribution = 'gamma'
test = SpatialTester(seed=SEED, dim=3, L=1.0, N=10000,
spatial_distribution=distribution)
_, p_ks = test.ks_test()
_, p_Z = test.z_test()
self.assertGreater(p_ks, P_MIN, '{} failed KS-test'.format(distribution))
self.assertGreater(p_Z, P_MIN, '{} failed Z-test'.format(distribution))
def suite():
suite = unittest.TestSuite([
unittest.TestLoader().loadTestsFromTestCase(TestSpatial2D),
unittest.TestLoader().loadTestsFromTestCase(TestSpatial2DOBC),
unittest.TestLoader().loadTestsFromTestCase(TestSpatial3D),
])
return suite
if __name__ == '__main__':
if not DEBUG_MODE:
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
elif PLOTTING_POSSIBLE:
test = PlottingSpatialTester(seed=SEED, dim=2, L=1.0, N=10000,
spatial_distribution='gaussian')
ks, p = test.ks_test()
print('p-value of KS-test:', p)
z, p = test.z_test()
print('p-value of Z-test:', p)
test.show_network()
test.show_PDF()
test.show_CDF()
plt.show()
else:
assert False, "DEBUG_MODE makes sense only if PLOTTING_POSSIBLE"
| gpl-2.0 |
nickgentoo/scikit-learn-graph | scripts/Keras_deep_calculate_cv_allkernels.py | 1 | 11280 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.NSPDK.NSPDKVectorizer import NSPDKVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from skgraph.datasets import load_graph_datasets
import numpy as np
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.layers import Input
from keras.models import Model
from keras import regularizers
#tutorial from https://blog.keras.io/building-autoencoders-in-keras.html
class DeepNNVariableLayers:
def __init__(self,inputDim,layerSize=None):
# this is our input placeholder
print layerSize
self.encoder = Sequential()
#input layer
input_img = Input(shape=(inputDim,))
encoded=input_img
#encoded=Dense(layerSize[0], init='uniform', activation='relu')(input_img)
#middle layers
for size in layerSize[0:]:
encoded=Dense(size, init='uniform', activation='relu')(encoded)
#output layer
#decoded=Dense(layerSize[-2], init='uniform', activation='relu')(encoded)
first=True
for size in reversed(layerSize[:-1]):
if (first==True):
decoded=Dense(size, init='uniform', activation='relu')(encoded)
first=False
else:
decoded=Dense(size, init='uniform', activation='relu')(decoded)
if (first==True):
decoded=encoded
decoded = Dense(inputDim, activation='sigmoid')(decoded)
self.model = Model(input=input_img, output=decoded)
print self.model.summary()
# this model maps an input to its encoded representation
self.encoder = Model(input=input_img, output=encoded)
print self.encoder.summary()
self.model.compile(optimizer='adagrad', loss='binary_crossentropy') # adagrad
#self.model = Sequential()
#self.model.add(Dense(layerSize[0], input_dim=inputDim, init='uniform', activation='tanh'))
#for size in layerSize[1:]:
# self.model.add(Dense(size, init='uniform', activation='tanh'))
class DeepNN:
def __init__(self,inputDim,layerSize=None):
# this is our input placeholder
print layerSize
input_img = Input(shape=(inputDim,))
# "encoded" is the encoded representation of the input
encoded = Dense(layerSize[0], activation='relu')(input_img)
encoded = Dense(layerSize[1], activation='relu')(encoded)
encoded = Dense(layerSize[2], activation='relu')(encoded)
# add a Dense layer with a L1 activity regularizer
#encoded = Dense(layerSize[0], activation='relu',
# activity_regularizer=regularizers.activity_l1(10e-3))(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(layerSize[1], activation='relu')(encoded)
decoded = Dense(layerSize[0], activation='relu')(decoded)
decoded = Dense(inputDim, activation='sigmoid')(decoded)
# this model maps an input to its reconstruction
self.model = Model(input=input_img, output=decoded)
print self.model.summary()
# this model maps an input to its encoded representation
self.encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(layerSize[2],))
# retrieve the last layer of the autoencoder model
decoder_layer = self.model.layers[-3]
# create the decoder model
self.decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
#self.model.compile(optimizer='adadelta', loss='binary_crossentropy')
self.model.compile(optimizer='adagrad', loss='binary_crossentropy')
#self.model = Sequential()
#self.model.add(Dense(layerSize[0], input_dim=inputDim, init='uniform', activation='tanh'))
#for size in layerSize[1:]:
# self.model.add(Dense(size, init='uniform', activation='tanh'))
#self.model.add(Dense(outputDim, init='uniform', activation='relu'))
class DNN:
def __init__(self,inputDim,layerSize=None):
# this is our input placeholder
print layerSize
input_img = Input(shape=(inputDim,))
# "encoded" is the encoded representation of the input
encoded = Dense(layerSize[0], activation='relu')(input_img)
# add a Dense layer with a L1 activity regularizer
#encoded = Dense(layerSize[0], activation='relu',
# activity_regularizer=regularizers.activity_l1(10e-1))(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(inputDim, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
self.model = Model(input=input_img, output=decoded)
# this model maps an input to its encoded representation
self.encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(layerSize[0],))
# retrieve the last layer of the autoencoder model
decoder_layer = self.model.layers[-1]
# create the decoder model
self.decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
#self.model.compile(optimizer='adadelta', loss='binary_crossentropy')
self.model.compile(optimizer='adagrad', loss='binary_crossentropy')
#self.model = Sequential()
#self.model.add(Dense(layerSize[0], input_dim=inputDim, init='uniform', activation='tanh'))
#for size in layerSize[1:]:
# self.model.add(Dense(size, init='uniform', activation='tanh'))
#self.model.add(Dense(outputDim, init='uniform', activation='relu'))
if __name__=='__main__':
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l kernel list_n_hidden_as_string")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
#name=str(sys.argv[4])
kernel=sys.argv[4]
n_hidden = map(int,sys.argv[5].split() )
#n_hidden=int(sys.argv[6])
#FIXED PARAMETERS
normalization=True
if dataset=="CAS":
print "Loading bursi(CAS) dataset"
g_it=load_graph_datasets.load_graphs_bursi()
elif dataset=="GDD":
print "Loading GDD dataset"
g_it=load_graph_datasets.load_graphs_GDD()
elif dataset=="CPDB":
print "Loading CPDB dataset"
g_it=load_graph_datasets.load_graphs_CPDB()
elif dataset=="AIDS":
print "Loading AIDS dataset"
g_it=load_graph_datasets.load_graphs_AIDS()
elif dataset=="NCI1":
print "Loading NCI1 dataset"
g_it=load_graph_datasets.load_graphs_NCI1()
elif dataset=="NCI109":
print "Loading NCI109 dataset"
g_it=load_graph_datasets.load_graphs_NCI109()
elif dataset=="NCI123":
print "Loading NCI123 dataset"
g_it=load_graph_datasets.load_graphs_NCI123()
elif dataset=="NCI_AIDS":
print "Loading NCI_AIDS dataset"
g_it=load_graph_datasets.load_graphs_NCI_AIDS()
else:
print "Unknown dataset name"
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
#print GM
# GMsvm=[]
# for i in xrange(len(GM)):
# GMsvm.append([])
# GMsvm[i]=[i+1]
# GMsvm[i].extend(GM[i])
# #print GMsvm
# from sklearn import datasets
# print "Saving Gram matrix"
# #datasets.dump_svmlight_file(GMsvm,g_it.target, name+".svmlight")
# datasets.dump_svmlight_file(np.array(GMsvm),g_it.target, name+".svmlight")
# #Test manual dump
#LEARN AUTOENCODER
print "Extracted", features.shape[1], "features from",features.shape[0],"examples."
n=features.shape[0]
densefeat=features.todense()
x_train=densefeat[:int(n*0.8),:]
#TODO sbagliato, fare slicing!
x_train = x_train.reshape((len(x_train), np.prod(features.shape[1])))
x_test=densefeat[int(n*0.8):,:]
print x_train.shape
print x_test.shape
#AutoEncoder=DeepNN(x_train.shape[1],layerSize=[n_hidden[0],n_hidden[1],n_hidden[2]])
AutoEncoder=DeepNNVariableLayers(x_train.shape[1],layerSize=n_hidden)
AutoEncoder.model.fit(x_train, x_train,
nb_epoch=10,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_features = AutoEncoder.encoder.predict(densefeat)
print "features encoded in", encoded_features.shape[1], "features"
from sklearn import cross_validation
from sklearn.svm import SVC, LinearSVC
clf = LinearSVC(C=100,dual=True) #, class_weight='auto'
#clf = SVC(C=1,kernel='rbf',gamma=0.001) #, class_weight='auto'
#
y_train=g_it.target
# kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=42)
# scores=cross_validation.cross_val_score(
# clf, encoded_features, y_train, cv=kf, scoring='accuracy')
# print scores
# print "Inner AUROC: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std())
#print GM
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
C_range = np.logspace(-2, 4, 7)
# gamma_range = np.logspace(-9, 3, 13)
param_grid = dict( C=C_range)
cv = StratifiedShuffleSplit(y_train, n_iter=10, test_size=0.2, random_state=42)
grid = GridSearchCV(LinearSVC(), param_grid=param_grid, cv=cv,verbose=10)
print "starting grid search"
grid.fit(encoded_features, y_train)
#
print("The best parameters are %s with a score of %0.4f"
% (grid.best_params_, grid.best_score_)) | gpl-3.0 |
murphy214/berrl | berrl/pipewidgets.py | 2 | 31866 | '''
Module: pipehtml.py
A module to parse html for data in static html and for data to be updated in real time.
Created by: Bennett Murphy
email: murphy214@marshall.edu
'''
import json
import itertools
import os
from IPython.display import IFrame
import ipywidgets as widgets
from math import floor
import numpy as np
import pandas as pd
from pipegeojson import *
from pipehtml import *
import time
from IPython.display import display
from quickmaps import *
# making html and writing html block
def make_write_html():
block = '''
<html>
<head>
<meta charset=utf-8 />
<title>PipeGeoJSON Demo</title>
<meta name='viewport' content='initial-scale=1,maximum-scale=1,user-scalable=no' />
<script src="https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.js"></script>
<script src='https://api.mapbox.com/mapbox.js/plugins/leaflet-omnivore/v0.2.0/leaflet-omnivore.min.js'></script>
<script src="http://code.jquery.com/jquery-1.11.3.min.js"></script>
<script src="sha256.js"></script>
<link href='https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.css' rel='stylesheet' />
<style>
body { margin:0; padding:0; }
#map { position:absolute; top:0; bottom:0; width:100%; }
</style>
</head>
<body>
<style>
table, th, td {
border: 1px solid black;
}
</style>
<div id='map'></div>
<script src="index.js"></script>
</body>
</html>
'''
with open ('index.html','wb') as f:
f.write(block)
# making starting block of js file
def make_startblock(apikey):
blockpart1 = """'use strict';\n\n/*global L */\n"""
# assembling second portion of initial js string block
blockpart2 = "L.mapbox.accessToken = '"+apikey+"';"
blockpart3 = '''
var map = L.mapbox.map('map', 'mapbox.streets',{
zoom: 5
});
'''
blockpart4 = '''
function check(map,dataLayer) {
$.getJSON('http://localhost:8000/data.json',function(data) { redraw(data,map,dataLayer); });
function redraw(data,map,dataLayer) {
console.log(data['value'])
if (data['value'] == true) {
map.removeLayer(dataLayer)
add1();
}
else {
setTimeout(function() {
check(map,dataLayer)
},500);
}
}
}'''
return blockpart1 + blockpart2 + blockpart3 + blockpart4
# get colors for just markers
def get_colors(color_input):
colors=[['light green','#36db04'],
['blue','#1717b5'],
['red','#fb0026'],
['yellow','#f9fb00'],
['light blue','#00f4fb'],
['orange','#dd5a21'],
['purple','#6617b5'],
['green','#1a7e55'],
['brown','#b56617'],
['pink','#F08080'],
['default','#1766B5']]
# logic for if a raw color input is given
if '#' in color_input and len(color_input)==7:
return color_input
# logic to find the matching color with the corresponding colorkey
for row in colors:
if row[0]==color_input:
return row[1]
return '#1766B5'
# get colorline for marker
def get_colorline_marker(color_input):
if not 'feature.properties' in str(color_input):
colorline=""" layer.setIcon(L.mapbox.marker.icon({'marker-color': '%s','marker-size': 'small'}))""" % get_colors(color_input)
else:
colorline=""" layer.setIcon(L.mapbox.marker.icon({'marker-color': %s,'marker-size': 'small'}))""" % color_input
return colorline
# get colorline for non-marker objects
def get_colorline_marker2(color_input):
if not 'feature.properties' in str(color_input):
colorline=""" layer.setStyle({color: '%s', weight: 3, opacity: 1});""" % get_colors(color_input)
else:
colorline=""" layer.setStyle({color: %s, weight: 6, opacity: 1});""" % color_input
return colorline
# the function actually used to make the styles table
# headers for each geojson property parsed in here
# html table code comes out
def make_rows(headers):
varblock = []
# makes a list of rows from a given input header
for row in headers:
row1 = row
row2 = row
if row == headers[0]:
newrow = """ var popupText = "<table><tr><th>%s: </th><td>" + feature.properties['%s']+"</td></tr>"; """ % (row1,row2)
else:
newrow = """ var popupText = popupText+ "<tr><th>%s: </th><td>" + feature.properties['%s']+"</td></tr>"; """ % (row1,row2)
varblock.append(newrow)
if row == headers[-1]:
newrow = """ var popupText = popupText+ "<tr><th>%s: </th><td>" + feature.properties['%s']+</td></tr></table>"; """ % (row1,row2)
return varblock
# make_blockstr with color and elment options added (newer)
# wraps the appropriate table html into the javascript functions called
def making_blockstr(varblock,count,colorline,element,time):
# starting wrapper that comes before html table code
start = """\n\tfunction addDataToMap%s(data, map) {\n\t\tvar dataLayer = L.geoJson(data, {\n\t\t\tonEachFeature: function(feature, layer) {""" % (count)
# ending wrapper that comes after html table code
if time == '':
if count == 1:
end = """
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350} ); }
dataLayer.addTo(map); };\n\t};"""
else:
end = """
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350})}})
dataLayer.addTo(map);
\n\t};\n\t}"""
else:
if count == 1:
end="""
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350})}})\n\t\tconsole.log(map.fitBounds(dataLayer.getBounds()))\n\t\tdataLayer.addTo(map)\n\t\tcheck(map,dataLayer)\n\t}\n}\n"""
else:
end="""
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350})}})\n\t\tdataLayer.addTo(map)\n\t\tcheck(map,dataLayer)\n\t}\n}\n"""
# iterates through each varblock and returns the entire bindings javascript block
total = ''
for row in varblock:
total += row
if element == 'Point':
return start + total + colorline + end
else:
return start + total + '\n' + colorline + end
# make bindings after color options were added
def make_bindings(headers,count,colorline,element,time):
varblock = make_rows(headers)
block = making_blockstr(varblock,count,colorline,element,time)
return block
# makes the javascript function to call and load all geojson
def async_function_call(maxcount):
# making start block text
start = 'function add() {\n'
# makign total block that will hold text
totalblock = start
count = 0
while count < maxcount:
count +=1
tempstring = '\tadd%s();\n' % str(count)
totalblock += tempstring
totalblock = totalblock + '}\nadd();'
return totalblock
# given a list of file names and kwargs carried throughout returns a string of the function bindings for each element
def make_bindings_type(filenames,color_input,colorkey,file_dictionary,time,portlist):
# instantiating string the main string block for the javascript block of html code
string = ''
# iterating through each geojson filename
count = 0
for filename,port in itertools.izip(filenames,portlist):
color_input = ''
count += 1
# reading in geojson file into memory
with open(filename) as data_file:
data = json.load(data_file)
#pprint(data)
# getting the featuretype which will later dictate what javascript splices are needed
data = data['features']
data = data[0]
featuretype = data['geometry']
featuretype = featuretype['type']
data = data['properties']
# code for if the file_dictionary input isn't false
#(i.e. getting the color inputs out of dictionary variable)
if not file_dictionary==False:
try:
color_input=file_dictionary[filename]
except Exception:
color_input=''
# logic for getting the colorline for different feature types
# the point feature requires a different line of code
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# logic for if a color key is given
# HINT look here for rgb raw color integration in a color line
if not colorkey == '':
if filename == filenames[0]:
colorkey = """feature.properties['%s']""" % colorkey
if featuretype == 'Point':
colorline = get_colorline_marker(str(colorkey))
else:
colorline = get_colorline_marker2(str(colorkey))
# this may be able to be deleted
# test later
# im not sure what the fuck its here for
if file_dictionary == False and colorkey == '':
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# iterating through each header
headers = []
for row in data:
headers.append(str(row))
# section of javascript code dedicated to the adding the data layer
if count == 1:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tvar map = L.mapbox.map('map', 'mapbox.streets',{
\t\t\tzoom: 5
\t\t\t}).fitBounds(dataLayer.getBounds());
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
else:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
# making the string section that locally links the geojson file to the html document
loc = """\t$.getJSON('http://localhost:%s/%s',function(data) { addDataToMap%s(data,map); });""" % (port,filename,count)
# creating block to be added to the total or constituent string block
if featuretype == 'Point':
bindings = make_bindings(headers,count,colorline,featuretype,time)+'\n'
stringblock = blocky + loc + bindings
else:
bindings = make_bindings(headers,count,colorline,featuretype,time)+'\n'
stringblock = blocky + loc + bindings
# adding the stringblock (one geojson file javascript block) to the total string block
string += stringblock
# adding async function to end of string block
string = string + async_function_call(count)
return string
# checks to see if a legends inputs values exist if so returns a splice of code instantiating legend variable
def check_legend(legend):
if legend[0]=='':
return ''
else:
return 'var map2 = map.legendControl.addLegend(document.getElementById("legend").innerHTML);'
# returns the legend starting block for intially formatting the area the legend will occupy
def make_top():
return '''<style>
.legend label,
.legend span {
display:block;
float:left;
height:15px;
width:20%;
text-align:center;
font-size:9px;
color:#808080;
}
</style>'''
# makes the legend if variables within the create legend function indicate a legend variable was given
def make_legend(title,colors,labels):
colorhashs=[]
for row in colors:
colorhashs.append(get_colors(row))
return '''
<div id='legend' style='display:none;'>
<strong>%s</strong>
<nav class='legend clearfix'>
<span style='background:%s;'></span>
<span style='background:%s;'></span>
<span style='background:%s;'></span>
<span style='background:%s;'></span>
<span style='background:%s;'></span>
<label>%s</label>
<label>%s</label>
<label>%s</label>
<label>%s</label>
<label>%s</label>
<small>Source: <a href="https://github.com/murphy214/berrl">Made using Berrl</a></small>
</div>
''' % (title,colorhashs[0],colorhashs[1],colorhashs[2],colorhashs[3],colorhashs[4],labels[0],labels[1],labels[2],labels[3],labels[4])
# returns the blocks of color backgrounds for a given list of colors
def make_colors_legend(colors):
total = ''
for row in colors:
newrow = """\t<span style='background:%s;'></span>\n""" % get_colors(row)
total += newrow
return total
# returns the block of labelsfor a given list of label integers or floats
def make_labels_legend(labels):
total = ''
for row in labels:
newrow = """\t<label>%s</label>\n""" % row
total += newrow
return total
# attempting to make a more dynamic legend in the same fashion as above
def make_legend2(title,colors,labels):
start = """
<div id='legend' style='display:none;'>
<strong>%s</strong>
<nav class='legend clearfix'>
""" % title
# code for creating color lines here
colorsblock = make_colors_legend(colors)
# code for getting 5 labels out of any amount of labels given
labels = get_5labels(labels)
# code for creating label lines here
# this may also contain spacer values for every x colors to label
labelsblock = make_labels_legend(labels)
end = """\t<small>Source: <a href="https://github.com/murphy214/berrl">Made using Berrl</a></small>
</div>
"""
total = start + colorsblock + labelsblock + end
return total
# returns the legend starting block for intially formatting the area the legend will occupy
def make_top2(rangelist):
widthpercentage = 100.0 / float(len(rangelist))
return '''<style>
.legend label,
.legend span {
display:block;
float:left;
height:15px;
width:xx%;
text-align:center;
font-size:9px;
color:#808080;
}
</style>'''.replace('xx',str(widthpercentage))
# generates 5 labels and then inserts dummy spaces in each label value not used
# may eventually accept a number of labels right now assumes 5 and returns adequate dummy labels for inbetween values
def get_5labels(rangelist):
# getting the round value in which all labels will be rounded
roundvalue = determine_delta_magnitude(rangelist)
# getting newrangelist
newrangelist = get_rounded_rangelist(rangelist,roundvalue)
# getting maximum character size
maxchar = get_maxchar_range(newrangelist)
# getting maximum width size
if '.' in str(newrangelist[1]):
maxwidth = get_max_width_size(maxchar,False)
else:
maxwidth = get_max_width_size(maxchar,True)
# getting the space label that occupies the maximum label size
spacelabel = get_dummy_space_label(maxwidth)
# getting label positions
labelpositions = [0]
labeldelta = len(newrangelist)/5
currentlabelposition = 0
# adding the 3 in between labels to the label positions list
# this code could be modified to support a integer with the number of labels you desire
while not len(labelpositions) == 5:
currentlabelposition += labeldelta
labelpositions.append(currentlabelposition)
# iterating through the newrangelist and appending the correpsondding label based upon
# the above strucuture
count = 0
newlist = []
for row in newrangelist:
oldrow = row
ind = 0
for row in labelpositions:
if count == row:
ind = 1
if ind == 1:
if int(oldrow) == float(oldrow):
oldrow = int(oldrow)
newlist.append(oldrow)
elif ind == 0:
newlist.append(spacelabel)
count +=1
return newlist
# creating function the max len value of the ranges given
def get_maxchar_range(rangelist):
maxsize = 0
for row in rangelist:
size = len(str(row))
if maxsize < size:
maxsize = size
return maxsize
# gets the value that the rangelist should be rounded to
# in attempt to maintain significant figures on the rangelist
def determine_delta_magnitude(rangelist):
# getting the rangedelta
delta = rangelist[1] - rangelist[0]
current = -15
while 10**current < delta:
oldcurrent = current
current +=1
roundvalue = oldcurrent * -1
return roundvalue
# returns a rangelist with the rounded to the value determined from determine_delta_magnitude
def get_rounded_rangelist(rangelist,roundvalue):
newrangelist = []
for row in rangelist:
row = round(row,roundvalue)
newrangelist.append(row)
return newrangelist
# getting width point size from the maxchar value
def get_max_width_size(maxcharsize,intbool):
# getting point size by adding what a period
if intbool == False:
pointsize = (maxcharsize - 1) * 6.673828125
pointsize += 3.333984375
else:
pointsize = maxcharsize * 6.673828125
return pointsize
# generates a label of only spaces to occupy the label positions
# while avoiding overlapping with previous labels
def get_dummy_space_label(maxwidthsize):
currentwidthsize = 0
dummylabel = ''
while currentwidthsize < maxwidthsize:
currentwidthsize += 3.333984375
dummylabel += ' '
return dummylabel
# creating legend instance if needed
def create_legend(title,colors,labels):
if not title=='':
return make_top2(colors)+'\n'+make_legend2(title,colors,labels)
else:
return ''
list = ['file.geojson']*100
# given a number of ports and filenames
# returns list of ports corresponding to each filename
def make_portlist(filenames,numberofports):
delta = len(filenames) / numberofports
count = 0
current = 8000
portlist = []
for row in filenames:
count += 1
portlist.append(current)
if count == delta:
count = 0
current += 1
ports = np.unique(portlist).tolist()
ports = pd.DataFrame(ports,columns=['PORTS'])
ports.to_csv('ports.csv',index=False)
return portlist
# makes the corresponding styled html for the map were about to load
def make_js(filenames,color_input,colorkey,apikey,file_dictionary,legend,time,number_ports):
# logic for development and fast use
if apikey == True:
apikey = 'pk.eyJ1IjoibXVycGh5MjE0IiwiYSI6ImNpam5kb3puZzAwZ2l0aG01ZW1uMTRjbnoifQ.5Znb4MArp7v3Wwrn6WFE6A'
# getting port list from filenames
portlist = make_portlist(filenames,number_ports)
# making start block
startblock = make_startblock(apikey)
# functions for creating legend block even if legend doesn't exist
newlegend = create_legend(legend[0],legend[1],legend[2])
# making the bindings (i.e. the portion of the code that creates the javascript)
bindings = make_bindings_type(filenames,color_input,colorkey,file_dictionary,time,portlist)
# making the legend check
checklegend = check_legend(legend)
# creating the constituent block combining all the above portions of the html code block
block = startblock + bindings + checklegend
# making initial data.json to load layer(s)
# updating json object that will be hashed
lastupdate = {'value':False}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
return block
# collection feature collecting all the geojson within the current directory
def collect():
jsons=[]
for dirpath, subdirs, files in os.walk(os.getcwd()):
for x in files:
if x.endswith(".geojson"):
jsons.append(x)
return jsons
# writes the html file to a document then opens it up in safari (beware it will call a terminal command)
def load(lines,filename):
with open(filename,'w') as f:
f.writelines(lines)
f.close()
os.system('open -a Safari index.html')
def show(url):
return IFrame(url, width=400, height=400)
# THE FUNCTION YOU ACTUALLY USE WITH THIS MODULE
def loadparsehtmlwidget(filenames,apikey,**kwargs):
color = ''
colorkey = ''
frame = False
file_dictionary = False
legend = ['','','']
time = 5000
test = False
number_ports = 1
for key,value in kwargs.iteritems():
if key == 'color':
color = str(value)
if key == 'colorkey':
colorkey = str(value)
if key == 'frame':
if value == True:
frame = True
if key == 'file_dictionary':
file_dictionary = value
if key == 'legend':
legend = value
if key == 'time':
time = int(value)
if key == 'test':
test = value
if key == 'number_ports':
number_ports = value
# writing static html block
make_write_html()
# getting the html block parsed from geojson dependent data
block = make_js(filenames,color,colorkey,apikey,file_dictionary,legend,time,number_ports)
if frame == True:
with open('index.js','w') as f:
f.write(block)
f.close()
return 'http://localhost:8000/index.html'
elif test == True:
print block
else:
load(block,'index.js')
# GOAL/IDEA
# beginning to assemble functions to assemble different widgets
# the goal being the inputs are lazy and one can input a dataset and
# something like a dict intslider,floatslider,and a field dropdown
# as a type then a value: either a tuble min and max value or a field to querryby
# instantiates the widget for certain types of widgets
# field is the column in which the widgeet will be sliced
# type is the type of widget that it is (i.e. intslider,floatslider,dropdown thing)
# dict is an input that will be returned with an updated dictionary from the input dict
def assemble_widget_dicts(field,values,widget_type,dictlist):
# if an empty dictionary is input for dictlist overwrites an empty list
if dictlist == {}:
dictlist = []
# instantiating widget for integer slider
if widget_type == 'IntSlider':
minslider = widgets.IntSlider(description='Min ' + str(field),min=values[0],max=values[1],continuous_update=False)
maxslider = widgets.IntSlider(description='Max ' + str(field),min=values[0],max=values[1],value=values[1],continuous_update=False)
dictentry = {'type':'IntSlider','field':str(field),'widget':[minslider,maxslider]}
dictlist.append(dictentry)
# instantiating widget for float slider
elif widget_type == 'FloatSlider':
# getting significant figures of delta between min and maxx
magnitude = determine_delta_magnitude([values[0],values[1]])
# getting stepsize determined by the magnitude of difference
# between min and max
stepsize = 10 ** -(magnitude + 2)
if stepsize < 10**-6:
stepsize = 10 ** -6
minvalue = round(values[0]-(.5*stepsize),magnitude+1)
maxvalue = round(values[1]+(.5*stepsize),magnitude+1)
# setting min and max slider
minslider = widgets.FloatSlider(description='Min ' + str(field),min=minvalue,max=maxvalue,step=stepsize,value=minvalue,continuous_update=False)
maxslider = widgets.FloatSlider(description='Max ' + str(field),min=minvalue,max=maxvalue,step=stepsize,value=maxvalue,continuous_update=False)
# adding dictentry which will be updated to the widget dictlist
dictentry = {'type':'FloatSlider','field':str(field),'widget':[minslider,maxslider]}
dictlist.append(dictentry)
elif widget_type == 'Dropdown':
# given a list of unique categorical values returns widget with dropdown
# for each value given
print values
dropdownwidget = widgets.Dropdown(description=str(field), options=values)
dropdownwidget.padding = 4
dictentry = {'type':'Dropdown','field':str(field),'widget':dropdownwidget}
dictlist.append(dictentry)
return dictlist
#assemble_widget_dicts('GEOHASH',['dnvfp6g'],'Dropdown',{})
# filters rows between a range and a field
# the range can contain either a float or an int
def on_value_change(min,max):
global data
# getting header
header = data.columns.values.tolist()
new = data[(data.LAT>min)&(data.LAT<max)]
if min < max and not len(new) == 0:
make_points(new,list=True,filename='points.geojson')
else:
dummy = make_dummy(header,'points')
parselist(dummy,'points.geojson')
'''
list range filter for integers and floats concurrency block
#int_range.observe(on_value_change, names='value')
#widgets.interact(on_value_change,min=int_range1,max=int_range2)
'''
'''
dropdownwidget concurrency block
dropdownwidget.observe(slice_by_category, names='on_dropdown')
widgets.interact(slice_by_category,on_dropdown=uniques)
'''
def slice_by_category(on_dropdown):
global data
field = 'VAR23C'
header = data.columns.values.tolist()
new = data[data[field]==on_dropdown]
if len(data) == 0:
make_dummy(header,'points')
else:
make_points(new,list=True,filename='points.geojson')
# function that takes a list of fields and a dict list of fields and returns a list
# dataframe of appropriate filter level
def get_df(field,fieldlist,filtereddict):
for row in fieldlist:
if row == field:
return filtereddict[oldrow]
oldrow = row
# attempting to make a function to instantiate all widgets
# it will accept a dataframe, a dictionary containing widgets
# in which it will be sliced, due to required inputs of 1 on the
# widget functions will instantiate globals and functions within the
# same function,data will be progressively filtered from its og set
# geo feature corresponding to the make_f unctions in pipegeojson
# 'blocks'
# 'points'
# 'line'
# 'polygon'
# although it will support multiple lines n
def instance_widgets(data,dictlist,ouput_filename,geo_feature_type):
# instancing filename for global use
global filename
global initialdata
global filtereddict
global dictlistglobal
initialdata = data
filename = ouput_filename
count = 0
fieldlist = []
filtereddict = {}
widgetslist = []
dictlistglobal = dictlist
# iterating through each row in dictlist (each widget)
for row in dictlist:
# appending row to fieldlist
fieldlist.append(row['field'])
#print row,count
#raw_input('ddd')
# instancing a global var for geo_feature_type
global geotype
geotype = geo_feature_type
widget_type = row['type']
if widget_type == 'FloatSlider' or widget_type == 'IntSlider':
# getting field and passing in filtereddata/fields
# as global paramters to wrap the created fruncton
field = row['field']
global filtereddata
global field
global geotype
global filename
global fieldlist
field = row['field']
if count == 0:
# function that takes to min and max
# then slices df appropriately
def on_value_change_first(min,max):
global filtereddata
global field
global geotype
global filename
global filtereddict
global initialdata
global fieldlist
# getting header values
header = initialdata.columns.values.tolist()
# slicing the df by min/max
new = initialdata[(initialdata[field]>=min)&(initialdata[field]<=max)]
'''
if len(new) == 0:
make_dummy(header,geo_feature_type)
else:
make_type(new,filename,geo_feature_type)
'''
make_dummy(header,geo_feature_type)
lastupdate = {'value':True}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
time.sleep(.5)
# updating json object that will be hashed
lastupdate = {'value':False}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
filtereddata = new
if len(filtereddict) == 0:
filtereddict = {field:filtereddata}
else:
filtereddict[field] = filtereddata
#if dictlistglobal[-1]['field'] == field and len(widgetslist) == len(dictlistglobal) and oldrange == 0:
if len(widgetslist) == len(dictlistglobal):
count = 0
oldrow = fieldlist[0]
# code to update slices here
for row in fieldlist[:]:
count += 1
if not dictlistglobal[count-1]['type'] == 'Dropdown':
minval,maxval = filtereddata[row].min(),filtereddata[row].max()
testval = tabs.children[count-1].children[0].children[1].value - tabs.children[count-1].children[0].children[0].value
print (maxval - minval),testval
if (maxval - minval) < testval:
tabs.children[count-1].children[0].children[0].value = minval
tabs.children[count-1].children[0].children[1].value = maxval
make_type(new,filename,geo_feature_type)
# getting slider 1 and slider2
slider1,slider2 = row['widget']
# instantiating widget with the desired range slices/function mapping
on_value_change_first(initialdata[field].min(),initialdata[field].max())
newwidget = widgets.interactive(on_value_change_first,min=slider1,max=slider2)
newwidget = widgets.Box(children=[newwidget])
widgetslist.append(newwidget)
else:
field = row['field']
global tabs
global oldrange
oldrange = 0
# function that takes to min and max
# then slices df appropriately
def on_value_change(min,max):
global filtereddata
global field
global geotype
global filename
global filtereddict
global fieldlist
global tabs
global oldrange
field = fieldlist[-1]
if not dictlistglobal[-1]['field'] == field:
oldrange = 0
if fieldlist[0] == field:
filtereddata = filtereddict[field]
else:
#raw_input('xxx')
filtereddata = get_df(field,fieldlist,filtereddict)
# getting header value
header = filtereddata.columns.values.tolist()
# slicing the df by min/max
new = filtereddata[(filtereddata[field]>=min)&(filtereddata[field]<=max)]
'''
if len(new) == 0:
make_dummy(header,geo_feature_type)
else:
make_type(new,filename,geo_feature_type)
'''
make_dummy(header,geo_feature_type)
lastupdate = {'value':True}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
time.sleep(.5)
# updating json object that will be hashed
lastupdate = {'value':False}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
filtereddata = new
filtereddict[field] = filtereddata
#if not dictlistglobal[-1]['field'] == field:
# oldrange = 0
#if dictlistglobal[-1]['field'] == field and len(widgetslist) == len(dictlistglobal) and oldrange == 0:
if len(widgetslist) == len(dictlistglobal):
count = 0
oldrow = fieldlist[0]
# code to update slices here
for row in fieldlist[:]:
count += 1
if not dictlistglobal[count-1]['type'] == 'Dropdown':
minval,maxval = filtereddata[row].min(),filtereddata[row].max()
testval = tabs.children[count-1].children[0].children[1].value - tabs.children[count-1].children[0].children[0].value
print (maxval - minval),testval
if (maxval - minval) < testval:
tabs.children[count-1].children[0].children[0].value = minval
tabs.children[count-1].children[0].children[1].value = maxval
make_type(new,filename,geo_feature_type)
# getting slider 1 and slider2
slider1,slider2 = row['widget']
# instantiating widget with the desired range slices/function mapping
on_value_change(initialdata[field].min(),initialdata[field].max())
newwidget = widgets.interactive(on_value_change,min=slider1,max=slider2)
newwidget = widgets.Box(children=[newwidget])
widgetslist.append(newwidget)
elif widget_type == 'Dropdown':
global fieldcategory
global filtereddata
global geotype
global filename
global filtereddict
global fieldlist
fieldcategory = row['field']
uniques = ['ALL'] + np.unique(data[fieldcategory]).tolist()
# function that slices by category input by
# dropdown box within widget
def slice_by_category(on_dropdown):
global filtereddata
global fieldcategory
global geo_feature_type
global filename
global filtereddict
global fieldlist
filtereddata = get_df(fieldcategory,fieldlist,filtereddict)
# getting header
header = filtereddata.columns.values.tolist()
# slicing category by appropriate field
if not on_dropdown == 'ALL':
new = filtereddata[filtereddata[fieldcategory]==on_dropdown]
elif on_dropdown == 'ALL':
new = filtereddata
# updating json object that will be hashed
lastupdate = {'value':True}
# checking to see if data actually has values
if len(new) == 0:
make_dummy(header,geotype)
else:
make_type(new,filename,geotype)
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
time.sleep(.5)
# updating json object that will be hashed
lastupdate = {'value':False}
with open('data.json','wb') as jsonfile:
json.dump(lastupdate,jsonfile)
filtereddata = new
filtereddict[fieldcategory] = filtereddata
print np.unique(new[fieldcategory])
# getting drop down feature from current row in dictlist
dropdownwidget = row['widget']
# instantiating widget for dropdown categorical values in a field
slice_by_category('ALL')
dropdownwidget.observe(slice_by_category, names='on_dropdown')
newwidget = widgets.interactive(slice_by_category,on_dropdown=uniques)
newwidget = widgets.Box(children = [newwidget])
widgetslist.append(newwidget)
print count
count += 1
tabs = widgets.Tab(children=widgetslist)
count = 0
for row in fieldlist:
tabs.set_title(count,row)
count += 1
display(tabs)
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
IDSIA/sacred | sacred/optional.py | 1 | 1570 | #!/usr/bin/env python
# coding=utf-8
import importlib
from sacred.utils import modules_exist
from sacred.utils import get_package_version, parse_version
def optional_import(*package_names):
try:
packages = [importlib.import_module(pn) for pn in package_names]
return True, packages[0]
except ImportError:
return False, None
def get_tensorflow():
# Ensures backward and forward compatibility with TensorFlow 1 and 2.
if get_package_version("tensorflow") < parse_version("1.13.1"):
import warnings
warnings.warn(
"Use of TensorFlow 1.12 and older is deprecated. "
"Use Tensorflow 1.13 or newer instead.",
DeprecationWarning,
)
import tensorflow as tf
else:
import tensorflow.compat.v1 as tf
return tf
# Get libc in a cross-platform way and use it to also flush the c stdio buffers
# credit to J.F. Sebastians SO answer from here:
# http://stackoverflow.com/a/22434262/1388435
try:
import ctypes
from ctypes.util import find_library
except ImportError:
libc = None
else:
try:
libc = ctypes.cdll.msvcrt # Windows
except OSError:
libc = ctypes.cdll.LoadLibrary(find_library("c"))
has_numpy, np = optional_import("numpy")
has_yaml, yaml = optional_import("yaml")
has_pandas, pandas = optional_import("pandas")
has_sqlalchemy = modules_exist("sqlalchemy")
has_mako = modules_exist("mako")
has_tinydb = modules_exist("tinydb", "tinydb_serialization", "hashfs")
has_tensorflow = modules_exist("tensorflow")
| mit |
ilyes14/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
theoryno3/pylearn2 | pylearn2/cross_validation/tests/test_train_cv_extensions.py | 49 | 1681 | """
Tests for TrainCV extensions.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_monitor_based_save_best_cv():
"""Test MonitorBasedSaveBestCV."""
handle, filename = tempfile.mkstemp()
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_monitor_based_save_best_cv %
{'save_path': filename})
trainer.main_loop()
# clean up
os.remove(filename)
test_yaml_monitor_based_save_best_cv = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: sigmoid,
act_dec: linear
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: exhaustive,
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
cv_extensions: [
!obj:pylearn2.cross_validation.train_cv_extensions.MonitorBasedSaveBestCV {
channel_name: train_objective,
save_path: %(save_path)s,
},
],
}
"""
| bsd-3-clause |
Mecanon/morphing_wing | experimental/comparison_model/actuator.py | 5 | 15329 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 15 17:27:40 2016
@author: Pedro Leal
"""
import math
from scipy.optimize import newton
import numpy as np
import matplotlib.pyplot as plt
class actuator():
"""
Actuator object where inputs:
- -(n): is a dictionary with keys 'x' and 'y', the coordinates of
the forwards end in the global coordinate system (origin at the
leading edge)
- +(p): is a dictionary with keys 'x' and 'y', the coordinates of
the afterwards end in the global coordinate system (origin at the
leading edge)
- J: is a dictionary with keys 'x' and 'y', the coordinates of
the joint in the global coordinate system (origin at the
leading edge)
- material: linear or SMA
"""
#
def __init__(self, geo_props, J, R = None, area = None, zero_stress_length = None,
eps_0 = None, k = None, material = 'linear', design = 'A',
actuator_type = 'spring'):
"""
Initiate class and it's basic atributes:
- geoprops: dictionary with the following keys:
- x_n and y_n: are the x & y coordinates of the forwards end in the
local coordinate system (origin at the joint). n stands for
negative
- x_p and y_p: are the x & y coordinates of the afterwards end in the
local coordinate system (origin at the joint). p stand for
positive
- area
- design:
- A: em X
- B: pulley
- C: co-linear
- eps_0: inital strain (epsilon), if not defined, it is zero
- theta: deflection angle theta (radians)
- k: linear spring elastic coefficient
"""
#Storing inputs in local coordinate system
self.x_n = geo_props['x-'] - J['x'] #Local Coordinates
self.y_n = geo_props['y-'] - J['y'] #Local Coordinates
self.x_p = geo_props['x+'] - J['x'] #Local Coordinates
self.y_p = geo_props['y+'] - J['y'] #Local Coordinates
self.x_J = J['x'] #Global Coordinates
self.y_J = J['y'] #Global Coordinates
self.material = material
self.design = design
# Projections of original wire length r
self.r_1_0 = self.x_p - self.x_n
self.r_2_0 = self.y_p - self.y_n
self.length_r_0 = math.sqrt(self.r_1_0**2 + self.r_2_0**2)
# Initializing current wire length r
self.r_1 = self.r_1_0
self.r_2 = self.r_2_0
self.length_r = self.length_r_0
#calculate rigid body distances
self.r_n = math.sqrt(self.x_n**2 + self.y_n**2)
self.r_p = math.sqrt(self.x_p**2 + self.y_p**2)
#define initial values for r, theta and force
self.theta = 0
self.F = 0.
# type of actuator
try:
self.actuator_type = geo_props['actuator_type']
except:
self.actuator_type = "wire"
if self.actuator_type == 'spring':
# Initializing the variables of the spring.
try:
self.D = geo_props['D']
except:
self.D = None
try:
self.N = geo_props['N']
except:
self.N = None
if R!= None:
self.R = R
#Cross section area
try:
self.area = geo_props['area']
except:
self.area = area
if self.design == 'B':
self.pulley_position = geo_props['pulley_position']
self.sigma = None
#In case zero_stress_length is not defined and initial strain is
#known, calculate the zero stress length. If defined, calculate
#the initial strain
if zero_stress_length == None and eps_0 != None:
self.eps_0 = eps_0
self.eps = self.eps_0
self.zero_stress_length = self.length_r_0/(1 + self.eps)
elif zero_stress_length != None and eps_0 == None:
self.zero_stress_length = zero_stress_length
self.eps_0 = self.length_r/self.zero_stress_length - 1.
self.eps = self.eps_0
if material == 'linear':
self.k = k
#TODO: Adicionei esta linha, pois acredito que é a melhor maneira de não invalidar os modelos que não
# tem raio no modelo.
if R!= None:
self.R = R
def calculate_theta(self, theta_0 = 0.):
"""
Calculate angle for given deformation epsilon via the newton method.
"""
if self.design == "A":
def diff_eq(theta):
sin = math.sin(theta)
cos = math.cos(theta)
diff = (2.*self.x_p*cos - 2.*self.y_p*sin)*(self.x_p*sin + \
self.y_p*cos - self.y_n) - (2.*self.x_p*sin + \
2.*self.y_p*cos)*(self.x_p*cos - self.x_n - self.y_p*sin)
return diff
def eq(theta):
eta = (self.eps - self.eps_0) + 1.
sin = math.sin(theta)
cos = math.cos(theta)
r_1 = self.x_p*cos - self.y_p*sin - self.x_n
r_2 = self.y_p*cos + self.x_p*sin - self.y_n
return r_1**2 + r_2**2 - (eta*self.length_r_0)**2
# print self.eps, self.eps_0
self.theta = newton(eq, theta_0, diff_eq, maxiter = 100)
if abs(self.theta) > math.pi:
self.theta = self.theta % (2.*math.pi)
return self.theta
if self.design == "B":
self.r_1 = (self.eps + 1)*self.zero_stress_length
self.theta = (self.r_1 - self.r_1_0)/self.R
if self.pulley_position == 'up':
self.theta = - self.theta
def update(self, theta = None):
"""If vector length r or theta is changed, new coordinates are
calculated"""
if theta != None:
self.theta = theta
if self.design == 'A':
self.r_1 = self.x_p*math.cos(self.theta) - \
self.y_p*math.sin(self.theta) - self.x_n
self.r_2 = self.y_p*math.cos(self.theta) + \
self.x_p*math.sin(self.theta) - self.y_n
self.length_r = math.sqrt(self.r_1**2 + self.r_2**2)
self.eps = self.length_r/self.zero_stress_length - 1.
elif self.design == 'B':
delta_r = self.theta*self.R
if self.pulley_position == "up":
self.r_1 = self.r_1_0 - delta_r
elif self.pulley_position == "down":
self.r_1 = self.r_1_0 + delta_r
self.r_2 = 0.
self.eps = self.r_1/self.zero_stress_length - 1.
def calculate_force(self, source = 'strain'):
if self.design == 'A' or self.design == 'B':
if source == 'strain':
if self.material == 'linear':
self.F = self.k*self.eps*self.zero_stress_length
elif self.material == 'SMA':
print "Put Edwin code here"
#Calculate force from stress and cross section
elif source == 'sigma':
self.F = self.area * self.sigma
return self.F
def calculate_torque(self):
"""Calculate torque given the actuator force: r \times F (where a is
global coordinates)"""
#calculate components of force vector
F_1 = - self.F*self.r_1/self.length_r
F_2 = - self.F*self.r_2/self.length_r
# print x_n, y_n
#calculate torque
if self.design == 'A' or self.design == 'C':
self.torque = (self.x_p*math.cos(self.theta) - \
self.y_p*math.sin(self.theta))*F_2 - \
(self.y_p*math.cos(self.theta) + \
self.x_p*math.sin(self.theta))*F_1
elif self.design == 'B':
#Because SMA positive force is actually negative ( the contrary
# of the linear we need separate statements for calculating
# torque)
if self.material == "linear":
self.torque = - self.y_p*(self.F)
elif self.material == "SMA":
self.torque = - self.y_p*(-self.F)
return self.torque
def plot_actuator(self):
if self.material == 'linear':
colour = 'b'
elif self.material == 'SMA':
colour = 'r'
if self.actuator_type == "wire":
plt.axes().set_aspect('equal')
plt.scatter([self.x_n + self.x_J, self.x_n + self.x_J + self.r_1],
[self.y_n + self.y_J, self.y_n + self.y_J + self.r_2],
c=colour)
plt.scatter([self.x_J],[self.y_J], c = 'g')
plt.plot([self.x_n + self.x_J, self.x_n + self.x_J + self.r_1],
[self.y_n + self.y_J, self.y_n + self.y_J + self.r_2],
colour)
def find_limits(self, y, theta_0 = 0):
"""The actuator has two major constraints:
A - Because there is no physical sense of an actuator that has any
part of it outside of the aircraft. We need to find the maximum
theta and eps the actuator can have without this taking place.
B - When r+ and r- are aligned, but + is between - and J, we
have the minimum length possible for the actuator. Below this,
it is quite unrealistic
The maximum and minimum theta is defined by the smallest of
theta_A and theta_B
"""
if self.design == 'A' or self.design == 'C':
def diff_eq(theta):
sin = math.sin(theta)
cos = math.cos(theta)
diff = -a*(-self.x_p*sin - self.y_p*cos) + self.x_p*cos - self.y_p*sin
return diff
def eq_theta_A(theta):
sin = math.sin(theta)
cos = math.cos(theta)
return -a*(self.x_p*cos - self.y_p*sin - 0) + \
self.x_p*sin + self.y_p*cos - y['l']
def eq_theta_B():
A = 2. - math.sqrt(self.x_p**2 + self.y_p**2)/math.sqrt(self.x_n**2 + self.y_n**2)
sin = A * (self.y_n - self.x_n*self.y_p/self.x_p)/(self.x_p + self.y_p**2/self.x_p)
cos = (A*self.x_n + self.y_p*sin)/self.x_p
return math.atan2(sin, cos)
# Constraint B
if self.r_n > self.r_p:
self.max_theta_B = math.atan2(self.y_n*self.x_p - self.x_n*self.y_p,
self.x_n*self.x_p + self.y_n*self.y_p)
self.max_theta_B = np.sign(self.max_theta_B) * (abs(self.max_theta_B) % (2*math.pi))
if self.max_theta_B > 0.:
self.min_theta_B = self.max_theta_B
self.max_theta_B = self.max_theta_B - 2*math.pi
else:
self.min_theta_B = self.max_theta_B + 2*math.pi
else:
self.max_theta_B = -math.pi/2.
self.min_theta_B = math.pi/2.
# Constraint A
#Avoid division by zero for when x_n is the origin
if abs(self.x_n) > 1e-4:
# print 'comparison', self.r_p, abs(y['l'])
if self.r_p >= abs(y['l']):
a = (y['l'] - self.y_n)/(0. - self.x_n)
self.max_theta_A = newton(eq_theta_A, theta_0, diff_eq, maxiter = 1000)
else:
self.max_theta_A = -math.pi/2.
if self.r_p >= abs(y['u']):
a = (y['u'] - self.y_n)/(0. - self.x_n)
self.min_theta_A = newton(eq_theta_A, theta_0, diff_eq, maxiter = 1000)
else:
self.min_theta_A = math.pi/2.
else:
self.max_theta_A = -math.pi/2.
self.min_theta_A = math.pi/2.
self.max_theta_A = np.sign(self.max_theta_A) * (abs(self.max_theta_A) % (2*math.pi))
self.min_theta_A = np.sign(self.min_theta_A) * (abs(self.min_theta_A) % (2*math.pi))
self.max_theta = max(self.max_theta_A, self.max_theta_B)
self.min_theta = min(self.min_theta_A, self.min_theta_B)
#In case of full transformation, we have the maximum eps
r_1 = self.x_p*math.cos(self.max_theta) - \
self.y_p*math.sin(self.max_theta) - self.x_n
r_2 = self.y_p*math.cos(self.max_theta) + \
self.x_p*math.sin(self.max_theta) - self.y_n
length_r = math.sqrt(r_1**2 + r_2**2)
self.max_eps = length_r/self.zero_stress_length - 1.
r_1 = self.x_p*math.cos(self.min_theta) - \
self.y_p*math.sin(self.min_theta) - self.x_n
r_2 = self.y_p*math.cos(self.min_theta) + \
self.x_p*math.sin(self.min_theta) - self.y_n
length_r = math.sqrt(r_1**2 + r_2**2)
self.min_eps = length_r/self.zero_stress_length - 1.
elif self.design == 'B':
if self.material == 'linear':
self.max_theta = - self.x_p/self.R
if self.pulley_position == "up":
self.max_theta = - self.max_theta
else:
print "The SMA spring/wire does not limit theta!!!"
def check_crossing_joint(self, tol = 1e-3):
"""Does the actuator cross the joint? Should not happen"""
#rotationed
x_p = self.x_p*math.cos(self.theta) - \
self.y_p*math.sin(self.theta)
y_p = self.y_p*math.cos(self.theta) + \
self.x_p*math.sin(self.theta)
B = (y_p - self.y_n)/(x_p - self.x_n)
y_at_J = self.y_n - B* self.x_n
if abs(y_at_J) < tol:
return True
else:
return False
# print 'theta_A', self.max_theta_A, self.min_theta_A
# print 'theta_B', self.max_theta_B, self.min_theta_B
#To constraint the secant method, I need to know the global
# def diff_eps(theta):
# sin = math.sin(theta)
# cos = math.cos(theta)
#
# diff = (2.*self.x_p*cos - 2.*self.y_p*sin)*(self.x_p*sin + \
# self.y_p*cos - self.y_n) - (2.*self.x_p*sin + \
# 2.*self.y_p*cos)*(self.x_p*cos - self.x_n - self.y_p*sin)
#
# return diff
#
# self.theta_max_eps = newton(diff_eps, self.eps_0)
# r_1 = self.x_p*math.cos(self.theta) - \
# self.y_p*math.sin(self.theta) - self.x_n
# r_2 = self.y_p*math.cos(self.theta) + \
# self.x_p*math.sin(self.theta) - self.y_n
# length_r = math.sqrt(r_1**2 + r_2**2)
# self.global_max_eps = length_r/self.zero_stress_length - 1.
| mit |
mbkumar/pymatgen | pymatgen/util/tests/test_plotting.py | 4 | 1230 | import unittest
from pymatgen.util.plotting import periodic_table_heatmap, van_arkel_triangle
from pymatgen.util.testing import PymatgenTest
import matplotlib
class FuncTestCase(PymatgenTest):
def test_plot_periodic_heatmap(self):
random_data = {'Te': 0.11083818874391202, 'Au': 0.7575629917425387,
'Th': 1.2475885304040335, 'Ni': -2.0354391922547705}
plt = periodic_table_heatmap(random_data, cmap="plasma")
plt = periodic_table_heatmap(random_data)
plt = periodic_table_heatmap(random_data, max_row=7)
plt = periodic_table_heatmap(random_data, max_row=10)
plt = periodic_table_heatmap(random_data, cbar_label_size=18)
plt = periodic_table_heatmap(random_data, cmap_range=[0, 1])
plt = periodic_table_heatmap(random_data, cbar_label='Hello World')
plt = periodic_table_heatmap(random_data, blank_color='white')
plt = periodic_table_heatmap(random_data, value_format='%.4f')
def test_van_arkel_triangle(self):
random_list = [("Fe", "C"), ("Ni", "F")]
plt = van_arkel_triangle(random_list)
plt = van_arkel_triangle(random_list, annotate=True)
if __name__ == "__main__":
unittest.main()
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/preprocessing/data.py | 1 | 67256 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
import numbers
import warnings
from itertools import chain, combinations
import numpy as np
from scipy import sparse
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import _incremental_mean_and_var
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| mit |
fbagirov/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
znes/HESYSOPT | hesysopt/restore_results.py | 1 | 1428 | # -*- coding: utf-8 -*-
"""
This module is used to configure the plotting. At the momemt it reads for
the default all results path and creates a multiindex dataframe. This is
used by the different plotting-modules. Also, colors are set here.
Note: This is rather ment to illustrate, how hesysopt results can be plotted,
than to depict a complete plotting ready to use library.
"""
import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def restore(scenarios=['1HBP', '2HBP', '4HBP']):
homepath = os.path.expanduser('~')
main_df = pd.DataFrame()
for s in scenarios:
resultspath = os.path.join(homepath, 'hesysopt_simulation', s, 'results',
'all_results.csv')
tmp = pd.read_csv(resultspath)
tmp['Scenario'] = s
main_df = pd.concat([main_df, tmp])
# restore orginial df multiindex
main_df.set_index(['Scenario', 'bus_label', 'type', 'obj_label', 'datetime'],
inplace=True)
# set colors
colors = {}
components = main_df.index.get_level_values('obj_label').unique()
colors['components'] = dict(zip(components,
sns.color_palette("coolwarm_r", len(components))))
colors['scenarios'] = dict(zip(scenarios,
sns.color_palette("muted", len(scenarios))))
return main_df, scenarios, colors
| gpl-3.0 |
ccauet/scikit-optimize | benchmarks/bench_ml.py | 1 | 15685 |
"""
This code implements benchmark for the black box optimization algorithms,
applied to a task of optimizing parameters of ML algorithms for the task
of supervised learning.
The code implements benchmark on 4 datasets where parameters for 6 classes
of supervised models are tuned to optimize performance on datasets. Supervised
learning models implementations are taken from sklearn.
Regression learning task is solved on 2 datasets, and classification on the
rest of datasets. 3 model classes are regression models, and rest are
classification models.
"""
from collections import defaultdict
from datetime import datetime
import json
import sys
import math
if sys.version_info.major == 2:
# Python 2
from urllib2 import HTTPError
from urllib import urlopen
else:
from urllib.error import HTTPError
from urllib import urlopen
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import fetch_mldata
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.externals.joblib import delayed
from sklearn.externals.joblib import Parallel
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import log_loss
from skopt import gbrt_minimize
from skopt import gp_minimize
from skopt import forest_minimize
from skopt.space import Categorical
from skopt.space import Integer
from skopt.space import Real
MODEL_PARAMETERS = "model parameters"
MODEL_BACKEND = "model backend"
# functions below are used to apply non - linear maps to parameter values, eg
# -3.0 -> 0.001
def pow10map(x):
return 10.0 ** x
def pow2intmap(x):
return int(2.0 ** x)
def nop(x):
return x
nnparams = {
# up to 1024 neurons
'hidden_layer_sizes': (Real(1.0, 10.0), pow2intmap),
'activation': (Categorical(['identity', 'logistic', 'tanh', 'relu']), nop),
'solver': (Categorical(['lbfgs', 'sgd', 'adam']), nop),
'alpha': (Real(-5.0, -1), pow10map),
'batch_size': (Real(5.0, 10.0), pow2intmap),
'learning_rate': (Categorical(['constant', 'invscaling', 'adaptive']), nop),
'max_iter': (Real(5.0, 8.0), pow2intmap),
'learning_rate_init': (Real(-5.0, -1), pow10map),
'power_t': (Real(0.01, 0.99), nop),
'momentum': (Real(0.1, 0.98), nop),
'nesterovs_momentum': (Categorical([True, False]), nop),
'beta_1': (Real(0.1, 0.98), nop),
'beta_2': (Real(0.1, 0.9999999), nop),
}
MODELS = {
MLPRegressor: nnparams,
SVR: {
'C': (Real(-4.0, 4.0), pow10map),
'epsilon': (Real(-4.0, 1.0), pow10map),
'gamma': (Real(-4.0, 1.0), pow10map)},
DecisionTreeRegressor: {
'max_depth': (Real(1.0, 4.0), pow2intmap),
'min_samples_split': (Real(1.0, 8.0), pow2intmap)},
MLPClassifier: nnparams,
SVC: {
'C': (Real(-4.0, 4.0), pow10map),
'gamma': (Real(-4.0, 1.0), pow10map)},
DecisionTreeClassifier: {
'max_depth': (Real(1.0, 4.0), pow2intmap),
'min_samples_split': (Real(1.0, 8.0), pow2intmap)}
}
# every dataset should have have a mapping to the mixin that can handle it.
DATASETS = {
"Boston": RegressorMixin,
"Housing": RegressorMixin,
"digits": ClassifierMixin,
"Climate Model Crashes": ClassifierMixin,
}
# bunch of dataset preprocessing functions below
def split_normalize(X, y, random_state):
"""
Splits data into training and validation parts.
Test data is assumed to be used after optimization.
Parameters
----------
* `X` [array-like, shape = (n_samples, n_features)]:
Training data.
* `y`: [array-like, shape = (n_samples)]:
Target values.
Returns
-------
Split of data into training and validation sets.
70% of data is used for training, rest for validation.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=random_state)
sc = StandardScaler()
sc.fit(X_train, y_train)
X_train, X_test = sc.transform(X_train), sc.transform(X_test)
return X_train, y_train, X_test, y_test
# this is used to process the output of fetch_mldata
def load_data_target(name):
"""
Loads data and target given the name of the dataset.
"""
if name == "Boston":
data = load_boston()
elif name == "Housing":
data = fetch_california_housing()
dataset_size = 1000 # this is necessary so that SVR does not slow down too much
data["data"] = data["data"][:dataset_size]
data["target"] =data["target"][:dataset_size]
elif name == "digits":
data = load_digits()
elif name == "Climate Model Crashes":
try:
data = fetch_mldata("climate-model-simulation-crashes")
except HTTPError as e:
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00252/pop_failures.dat"
data = urlopen(url).read().split('\n')[1:]
data = [[float(v) for v in d.split()] for d in data]
samples = np.array(data)
data = dict()
data["data"] = samples[:, :-1]
data["target"] = np.array(samples[:, -1], dtype=np.int)
else:
raise ValueError("dataset not supported.")
return data["data"], data["target"]
class MLBench(object):
"""
A class which is used to perform benchmarking of black box optimization
algorithms on various machine learning problems.
On instantiation, the dataset is loaded that is used for experimentation
and is kept in memory in order to avoid delays due to reloading of data.
Parameters
----------
* `model`: scikit-learn estimator
An instance of a sklearn estimator.
* `dataset`: str
Name of the dataset.
* `random_state`: seed
Initialization for the random number generator in numpy.
"""
def __init__(self, model, dataset, random_state):
X, Y = load_data_target(dataset)
self.X_train, self.y_train, self.X_test, self.y_test = split_normalize(
X, Y, random_state)
self.random_state = random_state
self.model = model
self.space = MODELS[model]
def evaluate(self, point):
"""
Fits model using the particular setting of hyperparameters and
evaluates the model validation data.
Parameters
----------
* `point`: dict
A mapping of parameter names to the corresponding values
Returns
-------
* `score`: float
Score (more is better!) for some specific point
"""
X_train, y_train, X_test, y_test = (
self.X_train, self.y_train, self.X_test, self.y_test)
# apply transformation to model parameters, for example exp transformation
point_mapped = {}
for param, val in point.items():
point_mapped[param] = self.space[param][1](val)
model_instance = self.model(**point_mapped)
if 'random_state' in model_instance.get_params():
model_instance.set_params(random_state=self.random_state)
min_obj_val = -5.0
# Infeasible parameters are expected to raise an exception, thus the try
# catch below, infeasible parameters yield assumed smallest objective.
try:
model_instance.fit(X_train, y_train)
if isinstance(model_instance, RegressorMixin): # r^2 metric
y_predicted = model_instance.predict(X_test)
score = r2_score(y_test, y_predicted)
elif isinstance(model_instance, ClassifierMixin): # log loss
y_predicted = model_instance.predict_proba(X_test)
score = -log_loss(y_test, y_predicted) # in the context of this function, the higher score is better
# avoid any kind of singularitites, eg probability being zero, and thus breaking the log_loss
if math.isnan(score):
score = min_obj_val
score = max(score, min_obj_val) # this is necessary to avoid -inf or NaN
except BaseException as ex:
score = min_obj_val # on error: return assumed smallest value of objective function
return score
# this is necessary to generate table for README in the end
table_template = """|Blackbox Function| Minimum | Best minimum | Mean f_calls to min | Std f_calls to min | Fastest f_calls to min
------------------|------------|-----------|---------------------|--------------------|-----------------------
| """
def calculate_performance(all_data):
"""
Calculates the performance metrics as found in "benchmarks" folder of
scikit-optimize and prints them in console.
Parameters
----------
* `all_data`: dict
Traces data collected during run of algorithms. For more details, see
'evaluate_optimizer' function.
"""
sorted_traces = defaultdict(list)
for model in all_data:
for dataset in all_data[model]:
for algorithm in all_data[model][dataset]:
data = all_data[model][dataset][algorithm]
# leave only best objective values at particular iteration
best = [[v[-1] for v in d] for d in data]
supervised_learning_type = "Regression" if ("Regressor" in model) else "Classification"
# for every item in sorted_traces it is 2d array, where first dimension corresponds to
# particular repeat of experiment, and second dimension corresponds to index
# of optimization step during optimization
key = (algorithm, supervised_learning_type)
sorted_traces[key].append(best)
# calculate averages
for key in sorted_traces:
# the meta objective: average over multiple tasks
mean_obj_vals = np.mean(sorted_traces[key], axis=0)
minimums = np.min(mean_obj_vals, axis=1)
f_calls = np.argmin(mean_obj_vals, axis=1)
min_mean = np.mean(minimums)
min_stdd = np.std(minimums)
min_best = np.min(minimums)
f_mean = np.mean(f_calls)
f_stdd = np.std(f_calls)
f_best = np.min(f_calls)
def fmt(float_value):
return ("%.3f" % float_value)
output = str(key[0]) + " | " + " | ".join(
[fmt(min_mean) + " +/- " + fmt(min_stdd)] + [fmt(v) for v in [min_best, f_mean, f_stdd, f_best]])
result = table_template + output
print("")
print(key[1])
print(result)
def evaluate_optimizer(surrogate_minimize, model, dataset, n_calls, random_state):
"""
Evaluates some estimator for the task of optimization of parameters of some
model, given limited number of model evaluations.
Parameters
----------
* `surrogate_minimize`:
Minimization function from skopt (eg gp_minimize) that is used
to minimize the objective.
* `model`: scikit-learn estimator.
sklearn estimator used for parameter tuning.
* `dataset`: str
Name of dataset to train ML model on.
* `n_calls`: int
Budget of evaluations
* `random_state`: seed
Set the random number generator in numpy.
Returns
-------
* `trace`: list of tuples
(p, f(p), best), where p is a dictionary of the form "param name":value,
and f(p) is performance achieved by the model for configuration p
and best is the best value till that index.
Such a list contains history of execution of optimization.
"""
# below seed is necessary for processes which fork at the same time
# so that random numbers generated in processes are different
np.random.seed(random_state)
problem = MLBench(model, dataset, random_state)
space = problem.space
dimensions_names = sorted(space)
dimensions = [space[d][0] for d in dimensions_names]
def objective(x):
# convert list of dimension values to dictionary
x = dict(zip(dimensions_names, x))
# the result of "evaluate" is accuracy / r^2, which is the more the better
y = -problem.evaluate(x)
return y
# optimization loop
result = surrogate_minimize(objective, dimensions, n_calls=n_calls, random_state=random_state)
trace = []
min_y = np.inf
for x, y in zip(result.x_iters, result.func_vals):
min_y = min(y, min_y)
x_dct = dict(zip(dimensions_names, x))
trace.append((x_dct, y, min_y))
print(random_state)
return trace
def run(n_calls=32, n_runs=1, save_traces=True, n_jobs=1):
"""
Main function used to run the experiments.
Parameters
----------
* `n_calls`: int
Evaluation budget.
* `n_runs`: int
Number of times to repeat the optimization in order to average out noise.
* `save_traces`: bool
Whether or not to save data collected during optimization
* `n_jobs`: int
Number of different repeats of optimization to run in parallel.
"""
surrogate_minimizers = [gbrt_minimize, forest_minimize, gp_minimize]
selected_models = sorted(MODELS, key=lambda x: x.__name__)
selected_datasets = (DATASETS.keys())
# all the parameter values and objectives collected during execution are stored in list below
all_data = {}
for model in selected_models:
all_data[model] = {}
for dataset in selected_datasets:
if not issubclass(model, DATASETS[dataset]):
continue
all_data[model][dataset] = {}
for surrogate_minimizer in surrogate_minimizers:
print(surrogate_minimizer.__name__, model.__name__, dataset)
seeds = np.random.randint(0, 2**30, n_runs)
raw_trace = Parallel(n_jobs=n_jobs)(
delayed(evaluate_optimizer)(
surrogate_minimizer, model, dataset, n_calls, seed
) for seed in seeds
)
all_data[model][dataset][surrogate_minimizer.__name__] = raw_trace
# convert the model keys to strings so that results can be saved as json
all_data = {k.__name__: v for k,v in all_data.items()}
# dump the recorded objective values as json
if save_traces:
with open(datetime.now().strftime("%m_%Y_%d_%H_%m_%s")+'.json', 'w') as f:
json.dump(all_data, f)
calculate_performance(all_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--n_calls', nargs="?", default=50, type=int,
help="Number of function calls.")
parser.add_argument(
'--n_runs', nargs="?", default=10, type=int,
help="Number of re-runs of single algorithm on single instance of a "
"problem, in order to average out the noise.")
parser.add_argument(
'--save_traces', nargs="?", default=False, type=bool,
help="Whether to save pairs (point, objective, best_objective) obtained"
" during experiments in a json file.")
parser.add_argument(
'--n_jobs', nargs="?", default=1, type=int,
help="Number of worker processes used for the benchmark.")
args = parser.parse_args()
run(args.n_calls, args.n_runs, args.save_traces, args.n_jobs)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/series/methods/test_dropna.py | 2 | 3488 | import numpy as np
import pytest
from pandas import (
DatetimeIndex,
IntervalIndex,
NaT,
Period,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDropna:
def test_dropna_empty(self):
ser = Series([], dtype=object)
assert len(ser.dropna()) == 0
return_value = ser.dropna(inplace=True)
assert return_value is None
assert len(ser) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
ser.dropna(axis=1)
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
return_value = ts.dropna(inplace=True)
assert return_value is None
assert ts.name == name
def test_dropna_no_nan(self):
for ser in [
Series([1, 2, 3], name="x"),
Series([False, True, False], name="x"),
]:
result = ser.dropna()
tm.assert_series_equal(result, ser)
assert result is not ser
s2 = ser.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, ser)
def test_dropna_intervals(self):
ser = Series(
[np.nan, 1, 2, 3],
IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),
)
result = ser.dropna()
expected = ser.iloc[1:]
tm.assert_series_equal(result, expected)
def test_dropna_period_dtype(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
result = ser.dropna()
expected = Series([Period("2011-01", freq="M")])
tm.assert_series_equal(result, expected)
def test_datetime64_tz_dropna(self):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
result = ser.dropna()
expected = Series(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")], index=[0, 2]
)
tm.assert_series_equal(result, expected)
# DatetimeTZBlock
idx = DatetimeIndex(
["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz="Asia/Tokyo"
)
ser = Series(idx)
assert ser.dtype == "datetime64[ns, Asia/Tokyo]"
result = ser.dropna()
expected = Series(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),
],
index=[0, 2],
)
assert result.dtype == "datetime64[ns, Asia/Tokyo]"
tm.assert_series_equal(result, expected)
def test_dropna_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series\.dropna "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.dropna(0)
expected = Series([1, 2, 3])
tm.assert_series_equal(result, expected)
| bsd-3-clause |
ivastar/clear | grizli_reduction.py | 1 | 33227 | #!/home/rsimons/miniconda2/bin/python
import matplotlib
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import drizzlepac
import grizli
import glob
from grizli import utils
import importlib
from grizli.prep import process_direct_grism_visit
#from hsaquery import query, overlaps
from grizli.pipeline import auto_script
from grizli.multifit import GroupFLT, MultiBeam, get_redshift_fit_defaults
import os, sys, argparse
from grizli.pipeline import photoz
from astropy.table import Table
import eazy
from joblib import Parallel, delayed
from glob import glob
from mastquery import query, overlaps
import gc
plt.ioff()
plt.close('all')
def parse():
'''
Parse command line arguments
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='''CLEAR grizli extractions.''')
parser.add_argument('-field', '--field', default='GS1', help='field to extract')
parser.add_argument('-mag_lim', '--mag_lim', type = int, default=25, help='field to extract')
parser.add_argument('-mag_max', '--mag_max', type = int, default= 0, help='field to extract')
parser.add_argument('-zr_min', '--zr_min', type = float, default= 0., help='field to extract')
parser.add_argument('-zr_max', '--zr_max', type = float, default= 12., help='field to extract')
parser.add_argument('-do_files', '--do_files', default = True, help = 'bool to load files')
parser.add_argument('-do_model', '--do_model', default = True, help = 'bool to model spectra')
parser.add_argument('-run_parallel', '--run_parallel', action = "store_true", default = False, help = 'fit with photometry')
parser.add_argument('-fwop', '--fwop', action = "store_true", default = False, help = 'fit with photometry')
parser.add_argument('-do_retrieve', '--do_retrieve', action = "store_true", default = False, help = 'bool to retrieve files from MAST')
parser.add_argument('-on_jase', '--on_jase', action = "store_true", default = False, help = 'bool to retrieve files from MAST')
parser.add_argument('-do_prep', '--do_prep', action = "store_true", default = False, help = 'bool to PREP files with Grizli')
parser.add_argument('-do_new_model', '--do_new_model', action = "store_true", default = False, help = 'bool to create new Grizli models')
parser.add_argument('-do_beams', '--do_beams', action = "store_true", default = False, help = 'bool to write beams files')
parser.add_argument('-do_fit', '--do_fit', action = "store_true", default = False, help = 'bool to fit modeled spectra')
parser.add_argument('-use_psf', '--use_psf', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-make_catalog', '--make_catalog', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-use_phot', '--use_phot', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-fit_min_id', '--fit_min_id', type = int, default = 0, help = 'ID to start on for the fit')
parser.add_argument('-n_jobs', '--n_jobs', type = int, default = -1, help = 'number of threads')
parser.add_argument('-id_choose', '--id_choose', type = int, default = None, help = 'ID to fit')
parser.add_argument('-pso', '--pso', type = int, default = 1, help = 'phot_scale_order')
parser.add_argument('-PATH_TO_RAW' , '--PATH_TO_RAW' , default = '/user/rsimons/grizli_extractions/RAW', help = 'path to RAW directory')
parser.add_argument('-PATH_TO_PREP' , '--PATH_TO_PREP' , default = '/user/rsimons/grizli_extractions/PREP', help = 'path to prep directory')
parser.add_argument('-PATH_TO_SCRIPTS', '--PATH_TO_SCRIPTS', default = '/user/rsimons/git/clear_local', help = 'path to scripts directory')
parser.add_argument('-PATH_TO_CATS' , '--PATH_TO_CATS' , default = '/user/rsimons/grizli_extractions/Catalogs', help = 'path to catalog directory')
parser.add_argument('-PATH_TO_HOME' , '--PATH_TO_HOME' , default = '/user/rsimons/grizli_extractions', help = 'path to home directory sans field')
args = vars(parser.parse_args())
return args
def readEazyBinary(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT', CACHE_FILE='Same'):
"""
Author: Gabe Brammer
This function has been clipped from eazyPy.py in thethreedhst git respository
https://github.com/gbrammer/threedhst/tree/master/threedhst
tempfilt, coeffs, temp_sed, pz = readEazyBinary(MAIN_OUTPUT_FILE='photz', \
OUTPUT_DIRECTORY='./OUTPUT', \
CACHE_FILE = 'Same')
Read Eazy BINARY_OUTPUTS files into structure data.
If the BINARY_OUTPUTS files are not in './OUTPUT', provide either a relative or absolute path
in the OUTPUT_DIRECTORY keyword.
By default assumes that CACHE_FILE is MAIN_OUTPUT_FILE+'.tempfilt'.
Specify the full filename if otherwise.
"""
#root='COSMOS/OUTPUT/cat3.4_default_lines_zp33sspNoU'
root = OUTPUT_DIRECTORY+'/'+MAIN_OUTPUT_FILE
###### .tempfilt
if CACHE_FILE == 'Same':
CACHE_FILE = root+'.tempfilt'
if os.path.exists(CACHE_FILE) is False:
print(('File, %s, not found.' %(CACHE_FILE)))
return -1,-1,-1,-1
f = open(CACHE_FILE,'rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()
lc = np.fromfile(file=f,dtype=np.double,count=NFILT)
zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)
fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
f.close()
tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}
###### .coeff
f = open(root+'.coeff','rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
coeffs = np.fromfile(file=f,dtype=np.double,count=NTEMP*NOBJ).reshape((NOBJ,NTEMP)).transpose()
izbest = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
tnorm = np.fromfile(file=f,dtype=np.double,count=NTEMP)
f.close()
coeffs = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'coeffs':coeffs,'izbest':izbest,'tnorm':tnorm}
###### .temp_sed
f = open(root+'.temp_sed','rb')
s = np.fromfile(file=f,dtype=np.int32, count=3)
NTEMP=s[0]
NTEMPL=s[1]
NZ=s[2]
templam = np.fromfile(file=f,dtype=np.double,count=NTEMPL)
temp_seds = np.fromfile(file=f,dtype=np.double,count=NTEMPL*NTEMP).reshape((NTEMP,NTEMPL)).transpose()
da = np.fromfile(file=f,dtype=np.double,count=NZ)
db = np.fromfile(file=f,dtype=np.double,count=NZ)
f.close()
temp_sed = {'NTEMP':NTEMP,'NTEMPL':NTEMPL,'NZ':NZ,\
'templam':templam,'temp_seds':temp_seds,'da':da,'db':db}
###### .pz
if os.path.exists(root+'.pz'):
f = open(root+'.pz','rb')
s = np.fromfile(file=f,dtype=np.int32, count=2)
NZ=s[0]
NOBJ=s[1]
chi2fit = np.fromfile(file=f,dtype=np.double,count=NZ*NOBJ).reshape((NOBJ,NZ)).transpose()
### This will break if APPLY_PRIOR No
s = np.fromfile(file=f,dtype=np.int32, count=1)
if len(s) > 0:
NK = s[0]
kbins = np.fromfile(file=f,dtype=np.double,count=NK)
priorzk = np.fromfile(file=f, dtype=np.double, count=NZ*NK).reshape((NK,NZ)).transpose()
kidx = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
pz = {'NZ':NZ,'NOBJ':NOBJ,'NK':NK, 'chi2fit':chi2fit, 'kbins':kbins, 'priorzk':priorzk,'kidx':kidx}
else:
pz = None
f.close()
else:
pz = None
if False:
f = open(root+'.zbin','rb')
s = np.fromfile(file=f,dtype=np.int32, count=1)
NOBJ=s[0]
z_a = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_p = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m1 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m2 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_peak = np.fromfile(file=f,dtype=np.double,count=NOBJ)
f.close()
###### Done.
return tempfilt, coeffs, temp_sed, pz
class Pointing():
""" Generalization of GN1, GS1, ERSPRIME, etc
To change field-dependent catalog, seg map, ref image, and padding
only need to change them here.
"""
def __init__(self, field, ref_filter):
if 'N' in field.upper():
self.pad = 200
#self.radec_catalog = PATH_TO_CATS + '/goodsN_radec.cat'
self.radec_catalog = PATH_TO_CATS + '/gdn_radec_f140_14_24.cat'
self.seg_map = PATH_TO_CATS + '/Goods_N_plus_seg.fits'
self.catalog = PATH_TO_CATS + '/goodsn-F105W-astrodrizzle-v4.4_drz_sub_plus.cat'
#self.catalog = PATH_TO_CATS + '/goodsn-v4.4-withunmatched.cat'
self.ref_image = PATH_TO_CATS + '/goodsn-F105W-astrodrizzle-v4.4_drz_sci.fits'
#self.tempfilt, self.coeffs, self.temp_sed, self.pz = readEazyBinary(MAIN_OUTPUT_FILE='goodsn_3dhst.v4.4', OUTPUT_DIRECTORY=PATH_TO_CATS, CACHE_FILE='Same')
self.params = {}
#self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodsn', 'v4.3')
self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodsn', 'v4.4', 'goodsn', 'v4.4')
self.params['Z_STEP'] = 0.002
self.params['Z_MAX'] = 4
self.params['MAIN_OUTPUT_FILE'] = '{0}_3dhst.{1}.eazypy'.format('goodsn', 'v4.4')
self.params['PRIOR_FILTER'] = 205
self.params['MW_EBV'] = {'aegis':0.0066, 'cosmos':0.0148, 'goodss':0.0069,
'uds':0.0195, 'goodsn':0.0103}['goodsn']
self.params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'
#self.translate_file = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Eazy/{0}_3dhst.{1}.translate'.format('goodsn', 'v4.3')
self.translate_file = PATH_TO_CATS + '/{0}_{1}.translate'.format('goodsn', 'v4.4')
elif 'S' in field.upper():
self.pad = 200 # grizli default
#self.radec_catalog = '../Catalogs/goodsS_radec.cat'
#self.radec_catalog = PATH_TO_CATS + '/goodsS_radec.cat'
self.radec_catalog = PATH_TO_CATS + '/gds_radec_f140_14_24.cat'
self.seg_map = PATH_TO_CATS + '/Goods_S_plus_seg.fits'
self.catalog = PATH_TO_CATS + '/goodss-F105W-astrodrizzle-v4.3_drz_sub_plus.cat'
#self.catalog = PATH_TO_CATS + '/goodss-v4.4-withunmatched.cat'
self.ref_image = PATH_TO_CATS + '/goodss-F105W-astrodrizzle-v4.3_drz_sci.fits'
#self.tempfilt, self.coeffs, self.temp_sed, self.pz = readEazyBinary(MAIN_OUTPUT_FILE='goodss_3dhst.v4.3', OUTPUT_DIRECTORY=PATH_TO_CATS, CACHE_FILE='Same')
self.params = {}
#self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodss', 'v4.3')
self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodss', 'v4.4', 'goodss', 'v4.4')
self.params['Z_STEP'] = 0.002
self.params['Z_MAX'] = 4
self.params['MAIN_OUTPUT_FILE'] = '{0}_3dhst.{1}.eazypy'.format('goodss', 'v4.4')
self.params['PRIOR_FILTER'] = 205
self.params['MW_EBV'] = {'aegis':0.0066, 'cosmos':0.0148, 'goodss':0.0069,
'uds':0.0195, 'goodsn':0.0103}['goodsn']
self.params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'
#self.translate_file = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Eazy/{0}_3dhst.{1}.translate'.format('goodss', 'v4.3')
self.translate_file = PATH_TO_CATS + '/{0}_{1}.translate'.format('goodss', 'v4.4')
def grizli_getfiles(run = True):
if run == False: return
else: 'Running grizli_getfiles...'
os.chdir(PATH_TO_PREP)
files = glob('%s/*flt.fits'%PATH_TO_RAW)
info = grizli.utils.get_flt_info(files)
visits, filters = grizli.utils.parse_flt_files(info=info, uniquename=True)
return visits, filters
def grizli_prep(visits, field = '', run = True):
if run == False: return
else: 'Running grizli_prep...'
print ('\n\n\n\n\n\n\n')
product_names = np.array([visit['product'] for visit in visits])
filter_names = np.array([visit['product'].split('-')[-1] for visit in visits])
basenames = np.array([visit['product'].split('.')[0]+'.0' for visit in visits])
for ref_grism, ref_filter in [('G102', 'F105W'), ('G141', 'F140W')]:
print ('Processing %s + %s visits'%(ref_grism, ref_filter))
for v, visit in enumerate(visits):
product = product_names[v]
basename = basenames[v]
filt1 = filter_names[v]
#print (filt1.lower())
field_in_contest = basename.split('-')[0]
#print (field_in_contest)
#if field_in_contest.upper() == field.upper() or field_in_contest.upper() in overlapping_fields[field]:
if (ref_filter.lower() == filt1.lower()):
#found a direct image, now search for grism counterpart
if len(np.where((basenames == basename) & (filter_names == ref_grism.lower()))[0]) > 0:
grism_index= np.where((basenames == basename) & (filter_names == ref_grism.lower()))[0][0]
#print(grism_index)
p = Pointing(field = field, ref_filter = ref_filter)
radec_catalog = p.radec_catalog
print (field_in_contest, visits[grism_index], radec_catalog)
#radec_catalog = None
status = process_direct_grism_visit(direct = visit,
grism = visits[grism_index],
radec = radec_catalog,
align_mag_limits = [14, 24])
else:
print ('no grism associated with direct image %s'%basename)
return visits, filters
def grizli_model(visits, field = '', ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141', run = True, new_model = False, mag_lim = 25):
if run == False: return
all_grism_files = []
all_direct_files = []
product_names = np.array([visit['product'] for visit in visits])
filter_names = np.array([visit['product'].split('-')[-1] for visit in visits])
basenames = np.array([visit['product'].split('.')[0]+'.0' for visit in visits])
for v, visit in enumerate(visits):
product = product_names[v]
basename = basenames[v]
filt1 = filter_names[v]
if (ref_filter_1.lower() in filt1) or (ref_filter_2.lower() in filt1):
all_direct_files.extend(visit['files'])
grism_index_1 = np.where((basenames == basename) & (filter_names == ref_grism_1.lower()))[0]
grism_index_2 = np.where((basenames == basename) & (filter_names == ref_grism_2.lower()))[0]
if len(grism_index_1) > 0: all_grism_files.extend(visits[grism_index_1[0]]['files'])
if len(grism_index_2) > 0: all_grism_files.extend(visits[grism_index_2[0]]['files'])
p = Pointing(field=field, ref_filter=ref_filter_1)
if not new_model: print('Loading contamination models...')
else: print('Initializing contamination models...')
grp = GroupFLT(
grism_files=all_grism_files,
direct_files=[],
ref_file = p.ref_image,
seg_file = p.seg_map,
catalog = p.catalog,
pad=p.pad,
cpu_count=4)
if new_model:
print('Computing contamination models with flat model...')
grp.compute_full_model(mag_limit=25, cpu_count = 4)
print('Refine continuum/contamination models with poly_order polynomial, subtracting off contamination..')
grp.refine_list(poly_order=2, mag_limits=[16, 24], verbose=False)
#poly_order = 3
print('Saving contamination models')
grp.save_full_data()
return grp
def grizli_beams(grp, id, min_id, mag, field = '', mag_lim = 35, mag_lim_lower = 35,fcontam = 0.2):
if (mag <= mag_lim) & (mag >=mag_lim_lower) & (id > min_id):
#print(id, mag)
beams = grp.get_beams(id, size=80)
# can separate beams extraction, save, load in without needing models
if beams != []:
print("beams: ", beams)
#mb = grizli.multifit.MultiBeam(beams, fcontam=1.0, group_name=field)
mb = grizli.multifit.MultiBeam(beams, fcontam=fcontam, group_name=field)
mb.write_master_fits()
def grizli_fit(id, min_id, mag, field = '', mag_lim = 35, mag_lim_lower = 35, run = True,
id_choose = None, ref_filter = 'F105W', use_pz_prior = True, use_phot = True,
scale_phot = True, templ0 = None, templ1 = None, ep = None, pline = None,
fcontam = 0.2, phot_scale_order = 1, use_psf = False, fit_without_phot = True, zr = [0., 12.]):
if os.path.exists(field + '_' + '%.5i.full.fits'%id): return
if (mag <= mag_lim) & (mag >=mag_lim_lower) & (id > min_id):
if (id_choose is not None) & (id != id_choose): return
#if os.path.isfile(field + '_' + '%.5i.stack.fits'%id): return
if os.path.isfile(field + '_' + '%.5i.beams.fits'%id):
print('Reading in beams.fits file for %.5i'%id)
mb = grizli.multifit.MultiBeam(field + '_' + '%.5i.beams.fits'%id, fcontam=fcontam, group_name=field)
wave = np.linspace(2000,2.5e4,100)
try:
print ('creating poly_templates...')
poly_templates = grizli.utils.polynomial_templates(wave=wave, order=7,line=False)
pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True, fitter='lstsq', fwhm=1400, get_uncertainties=2)
except:
print ('exception in poly_templates...')
return
# Fit polynomial model for initial continuum subtraction
if pfit != None:
#try:
try:
print ('drizzle_grisms_and_PAs...')
hdu, fig = mb.drizzle_grisms_and_PAs(size=32, fcontam=fcontam, flambda=False, scale=1,
pixfrac=0.5, kernel='point', make_figure=True, usewcs=False,
zfit=pfit,diff=True)
# Save drizzled ("stacked") 2D trace as PNG and FITS
fig.savefig('{0}_diff_{1:05d}.stack.png'.format(field, id))
hdu.writeto('{0}_diff_{1:05d}.stack.fits'.format(field, id), clobber=True)
except:
pass
if use_pz_prior:
#use redshift prior from z_phot
prior = np.zeros((2, len(p.tempfilt['zgrid'])))
prior[0] = p.tempfilt['zgrid']
prior[1] = p.pz['chi2fit'][:,id]
else:
prior = None
if fit_without_phot: phot = None
else:
print ('reading phot...')
tab = utils.GTable()
tab['ra'], tab['dec'], tab['id'] = [mb.ra], [mb.dec], id
phot, ii, dd = ep.get_phot_dict(tab['ra'][0], tab['dec'][0])
# Gabe suggests use_psf = True for point sources
if False:
try:
out = grizli.fitting.run_all(
id,
t0=templ0,
t1=templ1,
fwhm=1200,
zr=zr, #zr=[0.0, 12.0], #suggests zr = [0, 12.0] if we want to extend redshift fit
dz=[0.004, 0.0005],
fitter='nnls',
group_name=field,# + '_%i'%phot_scale_order,
fit_stacks=False, #suggests fit_stacks = False, fit to FLT files
prior=None,
fcontam=fcontam, #suggests fcontam = 0.2
pline=pline,
mask_sn_limit=np.inf, #suggests mask_sn_limit = np.inf
fit_only_beams=True, #suggests fit_only_beams = True
fit_beams=False, #suggests fit_beams = False
root=field,
fit_trace_shift=False,
bad_pa_threshold = np.inf, #suggests bad_pa_threshold = np.inf
phot=phot,
verbose=True,
scale_photometry=phot_scale_order,
show_beams=True,
use_psf = use_psf) #default: False
except:
print ('----------------\n----------------\n----------------\n----------------\n----------------\n')
print ('EXCEPTION IN FIT', id, mag)
print ('----------------\n----------------\n----------------\n----------------\n----------------\n')
pass
else:
out = grizli.fitting.run_all(
id,
t0=templ0,
t1=templ1,
fwhm=1200,
zr=zr, #zr=[0.0, 12.0], #suggests zr = [0, 12.0] if we want to extend redshift fit
dz=[0.004, 0.0005],
fitter='nnls',
group_name=field,# + '_%i'%phot_scale_order,
fit_stacks=False, #suggests fit_stacks = False, fit to FLT files
prior=None,
fcontam=fcontam, #suggests fcontam = 0.2
pline=pline,
mask_sn_limit=np.inf, #suggests mask_sn_limit = np.inf
fit_only_beams=True, #suggests fit_only_beams = True
fit_beams=False, #suggests fit_beams = False
root=field,
fit_trace_shift=False,
bad_pa_threshold = np.inf, #suggests bad_pa_threshold = np.inf
phot=phot,
verbose=True,
scale_photometry=phot_scale_order,
show_beams=True,
use_psf = use_psf) #default: False
print('Finished', id, mag)
else: return
def retrieve_archival_data(field, retrieve_bool = False):
if retrieve_bool == False: return
os.chdir(HOME_PATH)
parent = query.run_query(box = None, proposal_id = [14227], instruments=['WFC3/IR', 'ACS/WFC'],
filters = ['G102'], target_name = field)
tabs = overlaps.find_overlaps(parent, buffer_arcmin=0.1,
filters=['G102', 'G141'],
instruments=['WFC3/IR','WFC3/UVIS','ACS/WFC'], close=False)
pids = list(np.unique(tabs[0]['proposal_id']))
tabs = overlaps.find_overlaps(parent, buffer_arcmin=0.1, proposal_id = pids,
filters=['G102', 'G141', 'F098M', 'F105W', 'F125W', 'F140W'],
instruments=['WFC3/IR','WFC3/UVIS','ACS/WFC'], close=False)
footprint_fits_file = glob('*footprint.fits')[0]
jtargname = footprint_fits_file.strip('_footprint.fits')
#auto_script.fetch_files(field_root=jtargname, HOME_PATH=HOME_PATH, remove_bad=True, reprocess_parallel=False)
print (pids)
if __name__ == '__main__':
global PATH_TO_RAW, PATH_TO_PREP, PATH_TO_SCRIPTS, HOME_PATH, to_fits
#to_fits = np.array([9116, 16736, 18108, 15610, 19451])
args = parse()
#to_fits = np.array([17829])
#id_choose = 23116
field = args['field']
run_parallel = args['run_parallel']
mag_lim = args['mag_lim']
mag_max = args['mag_max']
files_bool = args['do_files']
retrieve_bool = args['do_retrieve']
prep_bool = args['do_prep']
model_bool = args['do_model']
on_jase = args['on_jase']
new_model = args['do_new_model']
fit_bool = args['do_fit']
beams_bool = args['do_beams']
use_psf = args['use_psf']
fit_min_id = args['fit_min_id']
n_jobs = args['n_jobs']
id_choose = args['id_choose']
phot_scale_order = args['pso']
fit_without_phot = args['fwop']
PATH_TO_SCRIPTS = args['PATH_TO_SCRIPTS']
PATH_TO_CATS = args['PATH_TO_CATS']
#PATH_TO_CATS = '/Users/rsimons/Desktop/clear/Catalogs'
PATH_TO_HOME = args['PATH_TO_HOME']
if on_jase:
PATH_TO_HOME = '/Users/rsimons/Desktop/clear/grizli_extractions'
PATH_TO_SCRIPTS = '/Users/rsimons/Dropbox/git/clear_local'
else:
PATH_TO_HOME = '/Users/rsimons/Desktop/clear/grizli_extractions'
PATH_TO_SCRIPTS = '/Users/rsimons/Dropbox/git/clear_local'
HOME_PATH = PATH_TO_HOME + '/' + field
make_catalog = args['make_catalog']
if fit_without_phot: phot_scale_order = -1
if on_jase:
PATH_TO_PREP = glob(HOME_PATH + '/Prep')[0]
else:
PATH_TO_RAW = glob(HOME_PATH + '/*/RAW')[0]
PATH_TO_PREP = glob(HOME_PATH + '/*/Prep')[0]
print('\n\n\n\n###################\nParameters\n\n')
print('field ', field )
print('mag_lim ', mag_lim )
print('mag_max ', mag_max )
print('files_bool ', files_bool )
print('retrieve_bool ', retrieve_bool )
print('prep_bool ', prep_bool )
print('model_bool ', model_bool )
print('new_model ', new_model )
print('beams_bool ', beams_bool )
print('fit_bool ', fit_bool )
print('use_psf ', use_psf )
print('fit_min_id ', fit_min_id )
print('n_jobs ', n_jobs )
print('id_choose ', id_choose )
print('phot_scale_order ', phot_scale_order )
print('fit_without_phot ', fit_without_phot )
print('PATH_TO_SCRIPTS ', PATH_TO_SCRIPTS )
print('PATH_TO_CATS ', PATH_TO_CATS )
print('PATH_TO_HOME ', PATH_TO_HOME )
print('HOME_PATH ', HOME_PATH )
print('\n\n\n\n####################\n\n\n\n')
if not os.path.isdir(HOME_PATH): os.system('mkdir %s'%HOME_PATH)
print ('Changing to %s'%HOME_PATH)
os.chdir(HOME_PATH)
extra = retrieve_archival_data(field = field, retrieve_bool = retrieve_bool)
print ('Changing to %s'%PATH_TO_PREP)
os.chdir(PATH_TO_PREP)
visits, filters = grizli_getfiles(run = files_bool)
if prep_bool:
grizli_prep(visits = visits, field = field, run = prep_bool)
if new_model:
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = new_model, mag_lim = mag_lim)
if beams_bool:
print ('making beams')
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = False, mag_lim = mag_lim)
Parallel(n_jobs = n_jobs, backend = 'threading')(delayed(grizli_beams)(grp, id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max)
for id, mag in zip(np.array(grp.catalog['NUMBER']), np.array(grp.catalog['MAG_AUTO'])))
if make_catalog:
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = False, mag_lim = mag_lim)
to_save = np.array([grp.catalog['NUMBER'], grp.catalog['MAG_AUTO']])
np.save('/user/rsimons/grizli_extractions/Catalogs/model_catalogs/%s_catalog.npy'%field, to_save)
if fit_bool:
print ('Changing to %s'%PATH_TO_PREP)
os.chdir(PATH_TO_PREP)
templ0 = grizli.utils.load_templates(fwhm=1200, line_complexes=True, stars=False,
full_line_list=None, continuum_list=None,
fsps_templates=True)
# Load individual line templates for fitting the line fluxes
templ1 = grizli.utils.load_templates(fwhm=1200, line_complexes=False, stars=False,
full_line_list=None, continuum_list=None,
fsps_templates=True)
#templ0, templ1 = grizli.utils.load_quasar_templates(uv_line_complex = False, broad_fwhm = 2800,
# narrow_fwhm = 1000, fixed_narrow_lines = True)
p = Pointing(field = field, ref_filter = 'F105W')
pline = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 16, 'wcs': None}
if not fit_without_phot:
eazy.symlink_eazy_inputs(path=os.path.dirname(eazy.__file__)+'/data')#, path_is_env=False)
ez = eazy.photoz.PhotoZ(param_file=None, translate_file=p.translate_file,
zeropoint_file=None, params=p.params,
load_prior=True, load_products=False)
ep = photoz.EazyPhot(ez, grizli_templates=templ0, zgrid=ez.zgrid)
else:
ep = None
cat_ = np.load('/user/rsimons/grizli_extractions/Catalogs/model_catalogs/%s_catalog.npy'%field)[()]
nums = cat_[0]
mags = cat_[1]
if run_parallel:
Parallel(n_jobs = n_jobs, backend = 'threading')(delayed(grizli_fit)(id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max, run = fit_bool,
id_choose = id_choose, use_pz_prior = False, use_phot = True,
scale_phot = True, templ0 = templ0, templ1 = templ1,
ep = ep, pline = pline, phot_scale_order = phot_scale_order, use_psf = use_psf, fit_without_phot = fit_without_phot,
zr = [args['zr_min'], args['zr_max']])
for id, mag in zip(nums.astype('int'), mags))
for id, mag in zip(nums.astype('int'), mags):
grizli_fit(id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max, run = fit_bool,
id_choose = id_choose, use_pz_prior = False, use_phot = True,
scale_phot = True, templ0 = templ0, templ1 = templ1,
ep = ep, pline = pline, phot_scale_order = phot_scale_order, use_psf = use_psf, fit_without_phot = fit_without_phot,
zr = [args['zr_min'], args['zr_max']])
print ('Changing to %s'%PATH_TO_SCRIPTS)
os.chdir(PATH_TO_SCRIPTS)
| mit |
mhue/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
xavierwu/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
ashtonwebster/tl_algs | tests/test_trbag.py | 1 | 2668 |
# coding: utf-8
import pandas as pd
from sklearn.datasets.samples_generator import make_blobs
from sklearn.ensemble import RandomForestClassifier
import random
from tl_algs import peters, weighted, trbag, tl_baseline, burak
RAND_SEED = 2016
random.seed(RAND_SEED) # change this to see new random data!
# randomly generate some data
X, domain_index = make_blobs(n_samples=1000, centers=5, n_features=2,
cluster_std=5, random_state=RAND_SEED)
# randomly assigning domain and label
all_instances = pd.DataFrame({"x_coord" : [x[0] for x in X],
"y_coord" : [x[1] for x in X],
"domain_index" : domain_index,
"label" : [random.choice([True,False]) for _ in X]},
columns = ['x_coord','y_coord','domain_index', 'label']
)
#arbitrarily set domain index 0 as target
test_set_domain = 0
# we are going to get fifty instances as test data
# note that this means that some of the training set has target instances!
test_set = all_instances[all_instances.domain_index == test_set_domain].sample(50, random_state=RAND_SEED)
test_set_X = test_set.loc[:, ["x_coord", "y_coord"]].reset_index(drop=True)
test_set_y = test_set.loc[:, ["label"]].reset_index(drop=True)
# gather all non-test indexes
train_pool = all_instances.iloc[all_instances.index.difference(test_set.index), ]
train_pool_X = train_pool.loc[:, ["x_coord", "y_coord"]].reset_index(drop=True)
train_pool_y = train_pool["label"].reset_index(drop=True)
train_pool_domain = train_pool.domain_index
transfer_learners = [
trbag.TrBag(test_set_X=test_set_X,
test_set_domain=test_set_domain,
train_pool_X=train_pool_X,
train_pool_y=train_pool_y,
train_pool_domain=train_pool_domain,
Base_Classifier=RandomForestClassifier,
sample_size=test_set_y.shape[0],
validate_proportion=0.5,
filter_func=trbag.mvv_filter,
vote_func=trbag.mean_confidence_vote,
rand_seed=RAND_SEED
),
trbag.TrBag(test_set_X=test_set_X,
test_set_domain=test_set_domain,
train_pool_X=train_pool_X,
train_pool_y=train_pool_y,
train_pool_domain=train_pool_domain,
Base_Classifier=RandomForestClassifier,
sample_size=test_set_y.shape[0],
validate_proportion=0.5,
filter_func=trbag.sc_trbag_filter,
vote_func=trbag.mean_confidence_vote,
rand_seed=RAND_SEED
)
]
for transfer_learner in transfer_learners:
print(transfer_learner.train_filter_test())
| mit |
mbeyeler/pulse2percept | pulse2percept/datasets/nanduri2012.py | 1 | 3284 | """`load_nanduri2012`"""
from os.path import dirname, join
import numpy as np
try:
import pandas as pd
has_pandas = True
except ImportError:
has_pandas = False
def load_nanduri2012(electrodes=None, task=None, shuffle=False, random_state=0):
"""Load data from [Nanduri2012]_
Load the threshold data described in [Nanduri2012]_. Average thresholds
were extracted from the figures of the paper using WebplotDigitizer.
=================== =====================
Retinal implants: Argus I
Subjects: 1
Number of samples: 95
Number of features: 14
=================== =====================
The dataset includes the following features:
==================== ================================================
subject Subject ID, S06
implant Argus I
electrode Electrode ID, A2, A4, B1, C1, C4, D2, D3, D4
task 'rate' or 'size'
stim_dur Stimulus duration (ms)
freq Stimulus frequency (Hz)
amp_factor Stimulus amplitude ratio over threshold
brightness Patient rated brightness compared to reference
stimulus
pulse_dur Pulse duration (ms)
pulse_type 'cathodicFirst'
interphase_dur Interphase gap (ms)
varied_param Whether this trial is a part of 'amp' or 'freq'
modulation
==================== ================================================
.. versionadded:: 0.7
Parameters
----------
electrodes : str | list of strings | None, optional
Select data from a single electrode or a list of electrodes.
By default, all electrodes are selected.
shuffle : boolean, optional
If True, the rows of the DataFrame are shuffled.
random_state : int | numpy.random.RandomState | None, optional
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
Returns
-------
data: pd.DataFrame
The whole dataset is returned in a 144x16 Pandas DataFrame
"""
if not has_pandas:
raise ImportError("You do not have pandas installed. "
"You can install it via $ pip install pandas.")
# Load data from CSV:
module_path = dirname(__file__)
file_path = join(module_path, 'data', 'nanduri2012.csv')
df = pd.read_csv(file_path)
# Select subset of data:
idx = np.ones_like(df.index, dtype=np.bool_)
if electrodes is not None:
if isinstance(electrodes, str):
electrodes = [electrodes]
idx_electrode = np.zeros_like(df.index, dtype=np.bool_)
for electrode in electrodes:
idx_electrode |= df.electrode == electrode
idx &= idx_electrode
df = df[idx]
if task is not None:
if task not in ['rate', 'size']:
raise ValueError("Task must be one of rate or size")
df = df[df['task'] == task]
if shuffle:
df = df.sample(n=len(df), random_state=random_state)
return df.reset_index(drop=True)
| bsd-3-clause |
CforED/Machine-Learning | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
google-research/google-research | xirl/xirl/evaluators/emb_visualizer.py | 1 | 2628 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D embedding visualizer."""
from typing import List
from .base import Evaluator
from .base import EvaluatorOutput
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from xirl.models import SelfSupervisedOutput
class EmbeddingVisualizer(Evaluator):
"""Visualize PCA of the embeddings."""
def __init__(self, num_seqs):
"""Constructor.
Args:
num_seqs: How many embedding sequences to visualize.
Raises:
ValueError: If the distance metric is invalid.
"""
super().__init__(inter_class=True)
self.num_seqs = num_seqs
def _gen_emb_plot(self, embs):
"""Create a pyplot plot and save to buffer."""
fig = plt.figure()
for emb in embs:
plt.scatter(emb[:, 0], emb[:, 1])
fig.canvas.draw()
img_arr = np.array(fig.canvas.renderer.buffer_rgba())[:, :, :3]
plt.close()
return img_arr
def evaluate(self, outs):
embs = [o.embs for o in outs]
# Randomly sample the embedding sequences we'd like to plot.
seq_idxs = np.random.choice(
np.arange(len(embs)), size=self.num_seqs, replace=False)
seq_embs = [embs[idx] for idx in seq_idxs]
# Subsample embedding sequences to make them the same length.
seq_lens = [s.shape[0] for s in seq_embs]
min_len = np.min(seq_lens)
same_length_embs = []
for emb in seq_embs:
emb_len = len(emb)
stride = emb_len / min_len
idxs = np.arange(0.0, emb_len, stride).round().astype(int)
idxs = np.clip(idxs, a_min=0, a_max=emb_len - 1)
idxs = idxs[:min_len]
same_length_embs.append(emb[idxs])
# Flatten embeddings to perform PCA.
same_length_embs = np.stack(same_length_embs)
num_seqs, seq_len, emb_dim = same_length_embs.shape
embs_flat = same_length_embs.reshape(-1, emb_dim)
embs_2d = PCA(n_components=2, random_state=0).fit_transform(embs_flat)
embs_2d = embs_2d.reshape(num_seqs, seq_len, 2)
image = self._gen_emb_plot(embs_2d)
return EvaluatorOutput(image=image)
| apache-2.0 |
zycdragonball/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 20 | 16058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
calebfoss/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 6 | 11427 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
meduz/scikit-learn | benchmarks/bench_glmnet.py | 111 | 3890 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
kirel/political-affiliation-prediction | partyprograms.py | 2 | 3428 | # -*- coding: utf-8 -*-
import re
import cPickle
from classifier import Classifier
import json
from scipy import ones,argmax
from sklearn.metrics import classification_report,confusion_matrix
def partyprograms(folder='model'):
clf = Classifier(folder=folder)
# converted with pdftotext
text = {}
bow = {}
# from https://www.spd.de/linkableblob/96686/data/20130415_regierungsprogramm_2013_2017.pdf
txt = open(folder+'/textdata/SPD_programm.txt').read()
# remove page footer
txt = re.sub(r'\W+Das Regierungsprogramm 2013 – 2017\W+\d+\W+','\n',txt)
# split in sections
txt = re.split('\n(IX|IV|V?I{0,3}\.\d? )',txt)
text['spd'] = txt
# from http://www.cdu.de/sites/default/files/media/dokumente/regierungsprogramm-2013-2017-langfassung-20130911.pdf
txt = open(folder+'/textdata/CDU_programm.txt').read()
# remove page footer
txt = re.sub(r'\W+Gemeinsam erfolgreich für Deutschland | Regierungsprogramm 2013 – 2017\W+','\n',txt)
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split(r'\n\d\.\d?\W',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['cdu'] = txt
# from https://www.die-linke.de/fileadmin/download/wahlen2013/bundestagswahlprogramm/bundestagswahlprogramm2013_langfassung.pdf
txt = open(folder+'/textdata/LINKE_programm.txt').read()
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split('\n\n+',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['linke'] = txt
# from http://www.gruene.de/fileadmin/user_upload/Dokumente/Wahlprogramm/Wahlprogramm-barrierefrei.pdf
txt = open(folder+'/textdata/GRUENE_programm.txt').read()
# remove page footer
txt = re.sub(r'(\d+)?\W+Bundestagswahlprogramm 2013\nBündnis 90/Die Grünen\W+\d?\n','\n',txt)
txt = re.sub(r'Teilhaben. Einmischen. Zukunft schaffen.','',txt)
txt = re.sub(r'Zeit für den grünen Wandel','',txt)
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split(r'\n\d\.\d?\W',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['gruene'] = txt
json.dump(text,open(folder+'/textdata/programs.json', 'wb'),ensure_ascii=False)
predictions,predictions_total = dict(),dict()
Ytrue, Yhat = [],[]
for key in text.keys():
predictions[key] = []
# for each paragraph separately
for paragraph in text[key]:
prediction = clf.predict(paragraph)['prediction']
idx = argmax([x['probability'] for x in prediction])
Yhat.append(text.keys().index(prediction[idx]['party']))
predictions[key].append(prediction)
#predictions[key] = map(lambda x: clf.predict(x)['prediction'],text[key])
# for the entire program at once
predictions_total[key] = clf.predict(' '.join(text[key]))['prediction']
Ytrue.extend(ones(len(text[key]))*text.keys().index(key))
print(confusion_matrix(Ytrue,Yhat))
print(classification_report(Ytrue,Yhat,target_names=text.keys()))
json.dump(predictions,open(folder+'/textdata/predictions.json','wb'),ensure_ascii=False)
json.dump(predictions_total,open(folder+'/textdata/predictions_total.json','wb'),ensure_ascii=False)
| mit |
yask123/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/learning_curve.py | 8 | 15418 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
okadate/romspy | romspy/tplot/tplot_param.py | 1 | 4044 | # coding: utf-8
# (c) 2016-01-27 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.offsetbox import AnchoredText
import numpy as np
import pandas as pd
import glob
import romspy
def tplot_param(inifiles, vname, ax=plt.gca()):
for inifile in inifiles:
print inifile,
nc = netCDF4.Dataset(inifile, 'r')
param = nc[vname][:]
#param = np.exp(param)
time = nc['ocean_time'][:]
time = netCDF4.num2date(time, romspy.JST)
print time, param
if 'params' not in locals():
default = param[-1]
params = param[0]
times = time[0]
else:
params = np.append(params, param[0])
times = np.append(times, time[0])
ax.plot(times, params, 'o-', label='opt param')
ax.set_ylabel(vname)
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
moving_avg(times, params, ax, window=3)
moving_avg(times, params, ax, window=7)
ax.legend()
pmean = np.mean(params)
pmedian = np.median(params)
#ax.text(0.1,0.1,'mean={}'.format(pmean), transform=ax.transAxes)
text = AnchoredText('mean={:.2e}'.format(pmean), loc=2)
ax.add_artist(text)
ax.plot([times[0], times[-1]], [default, default], '-', alpha=0.5, label='default')
def moving_avg(times, params, ax, window=3):
df = pd.DataFrame(data={'param':params}, index=times)
#df = df.resample('mean', )
df = pd.rolling_mean(df, window=window, min_periods=1, center=True)
ax.plot(df.index, df.param.values, '--', label='{}d-avg'.format(window))
def _get_files(tmpfile, hours):
Nfiles = len(glob.glob(tmpfile))
tmpfile = tmpfile.replace('*','{}')
outfiles = [tmpfile.format(i) for i in range(0,hours*Nfiles,hours)]
return outfiles
def _plot6(inifiles):
fig, ax = plt.subplots(6,1,figsize=(12,12))
tplot_param(inifiles, 'P01', ax=ax[0])
tplot_param(inifiles, 'P04', ax=ax[1])
tplot_param(inifiles, 'P05', ax=ax[2])
tplot_param(inifiles, 'P06', ax=ax[3])
tplot_param(inifiles, 'P07', ax=ax[4])
tplot_param(inifiles, 'P08', ax=ax[5])
return ax
def _get_pfactor(test):
if '0001' in test:
return 0.001
elif '0005' in test:
return 0.005
elif '001' in test:
return 0.01
elif '005' in test:
return 0.05
elif '01' in test:
return 0.1
def main(test, hours=24):
inifiles = _get_files('/home/okada/ism-i/apps/OB500P/testDA/{}/output/ob500_ini_*.nc'.format(test), hours=hours)
figfile = '/home/okada/Dropbox/Figures/2016_param/tplot_param_{}.png'.format(test)
if test == 'param2':
fig, ax = plt.subplots(2,1)
tplot_param(inifiles, 'P01', ax=ax[0])
tplot_param(inifiles, 'P04', ax=ax[1])
ax[0].set_title('4dvar(ini+param), window=1day, pfactor=0.1')
elif 'param3' in test:
ax = _plot6(inifiles)
pfactor = _get_pfactor(test)
ax[0].set_title('4dvar(ini+param), window=1day, pfactor={}'.format(pfactor))
elif 'param4' in test:
ax = _plot6(inifiles)
pfactor = _get_pfactor(test)
ax[0].set_title('4dvar(param), window=1day, pfactor={}'.format(pfactor))
elif 'param5' in test:
ax = _plot6(inifiles)
pfactor = '*'
ax[0].set_title('4dvar(ini+param), window=1day, pfactor={}'.format(pfactor))
elif 'param6' in test:
ax = _plot6(inifiles)
pfactor = '*'
ax[0].set_title('4dvar(param), window=7day, pfactor={}'.format(pfactor))
romspy.savefig(figfile)
#plt.show()
if __name__ == '__main__':
import seaborn as sns
#main('param5-05')
#main('param5-01')
#main('param5-005')
#main('param5-001')
#main('param5-01-hev')
#main('param5-001-hev')
#main('param5-001-7days', hours=24*7)
#main('param6-p01-1', hours=24*7)
#main('param6-p001-1', hours=24*7)
#main('param6R-p01-7', hours=24*7)
#main('param6R-p001-7', hours=24*7)
main('param6-ini', hours=24)
| mit |
sebalander/trilateration | trilatera.py | 1 | 11639 | '''
practicar trilateracion
'''
# %%
import numpy as np
import numpy.linalg as ln
import matplotlib.pyplot as plt
import numdifftools as ndf
from scipy.special import chdtri
# %%
kml_file = "/home/sebalander/Code/VisionUNQextra/trilateration/trilat.kml"
# %%
texto = open(kml_file, 'r').read()
names = list()
data = list()
for line in texto.splitlines():
line = line.replace("\t","")
if line.find("name") is not -1:
name = line[6:8]
#print(name)
if line.find("coordinates") is not -1:
coords = line[13:-14]
lon, lat, _ = coords.split(sep=',')
names.append(name)
data.append([float(lon), float(lat)])
#print(data[-1])
data = np.array(data).T
data, names
xGPS = data.T
# plot data gathered from xml file
fig, ax = plt.subplots()
ax.scatter(data[0], data[1])
ax.set_xlim([min(data[0]) - 2e-5, max(data[0]) - 2e-5])
ax.set_ylim([min(data[1]) - 2e-5, max(data[1]) - 2e-5])
for i, tx in enumerate(names):
ax.annotate(tx, (data[0,i], data[1,i]))
# %% distancias medidas con bosch glm 250 vf
# cargar las distancias puestas a mano en Db (bosch)
# tambien las distancias sacada de google earth Dg
Db = np.zeros((8, 8, 3), dtype=float)
Db[0, 1] = [8.6237, 8.6243, 8.6206]
Db[0, 2] = [7.0895, 7.0952, 7.0842]
Db[0, 3] = [13.097, 13.104, 13.107]
Db[0, 4] = [18.644, 18.642, 18.641]
Db[0, 5] = [24.630, 24.649, 24.670]
Db[0, 6] = [25.223, 25.218, 25.219]
Db[0, 7] = [41.425, 41.391, 41.401]
Db[1, 2] = [3.8999, 3.8961, 3.89755] # la tercera la invente como el promedio
Db[1, 3] = [14.584, 14.619, 14.6015] # tercera inventada
Db[1, 4] = [17.723, 17.745, 17.775]
Db[1, 5] = [25.771, 25.752, 25.752]
Db[1, 6] = [22.799, 22.791, 22.793]
Db[1, 7] = [41.820, 41.827, 41.826]
Db[2, 3] = [10.678, 10.687, 10.682]
Db[2, 4] = [14.287, 14.262, 14.278]
Db[2, 5] = [22.016, 22.003, 22.002]
Db[2, 6] = [19.961, 19.962, 19.964]
Db[2, 7] = [38.281, 38.289, 38.282] # * medicion corrida 37cm por un obstaculo
Db[3, 4] = [6.3853, 6.3895, 6.3888]
Db[3, 5] = [11.640, 11.645, 11.644]
Db[3, 6] = [13.599, 13.596, 13.606]
Db[3, 7] = [28.374, 28.371, 28.366]
Db[4, 5] = [8.8504, 8.8416, 8.8448]
Db[4, 6] = [7.2463, 7.2536, 7.2526]
Db[4, 7] = [24.088, 24.086, 24.087]
Db[5, 6] = [10.523, 10.521, 10.522]
Db[5, 7] = [16.798, 16.801, 16.797]
Db[6, 7] = [20.794, 20.786, 20.788]
Db -= 0.055 # le resto 5.5cm porque medimos desde la bas en lugar de l centro
indAux = np.arange(len(Db))
Db[indAux, indAux] = 0.0
dg = np.zeros(Db.shape[:2], dtype=float)
dg[0, 1] = 8.27
dg[0, 2] = 6.85
dg[0, 3] = 13.01
dg[0, 4] = 18.5
dg[0, 5] = 24.79
dg[0, 6] = 25.07
dg[0, 7] = 41.22
dg[1, 2] = 3.65
dg[1, 3] = 14.53
dg[1, 4] = 17.63
dg[1, 5] = 26.00
dg[1, 6] = 22.82
dg[1, 7] = 41.65
dg[2, 3] = 10.89
dg[2, 4] = 14.53
dg[2, 5] = 22.55
dg[2, 6] = 20.22
dg[2, 7] = 38.43
dg[3, 4] = 6.37
dg[3, 5] = 11.97
dg[3, 6] = 13.30
dg[3, 7] = 28.26
dg[4, 5] = 9.17
dg[4, 6] = 6.93
dg[4, 7] = 24.05
dg[5, 6] = 10.40
dg[5, 7] = 16.41
dg[6, 7] = 20.49
# las hago simetricas para olvidarme el tema de los indices
triuInd = np.triu_indices(8)
dg.T[triuInd] = dg[triuInd]
Db.transpose([1,0,2])[triuInd] = Db[triuInd]
db = np.mean(Db, axis=2)
# grafico comparacion de matris de distancias
plt.figure()
plt.imshow(np.hstack([db, dg]))
#plt.figure()
#plt.imshow(db - dg)
#
#plt.figure()
#plt.imshow((db - dg) / db)
# %% hacemos trilateracion sencilla para sacar condiciones iniciales
# el array con las coordenadas de todos los puntos
def trilateracion(d, signos=None):
'''
calcula las posiciones de todos los puntos a partir de los dos primeros
tomados como origen y direccion de versor x
si se provee una lista de signos, se corrige la direccion y
'''
d2 = d**2 # para usar las distancias al cuadrado
X = np.empty((d.shape[0], 2), dtype=float)
# ptos de base
X[0, 0] = 0 # origen de coords
X[0, 1] = 0
X[1, 0] = d[0, 1] # sobre el versor x
X[1, 1] = 0
X[2:, 0] = (d2[0, 1] + d2[0, 2:] - d2[1, 2:]) / 2 / d[0, 1]
X[2:, 1] = np.sqrt(d2[0, 2:] - X[2:, 0]**2)
if signos is not None:
X[2:, 1] *= signos
return X
Xb = trilateracion(db)
Xg = trilateracion(dg)
fig, ax = plt.subplots()
ax.scatter(Xb.T[0], Xb.T[1], label='bosch')
ax.scatter(Xg.T[0], Xg.T[1], label='google-earth')
for i, tx in enumerate(names):
ax.annotate(tx, (Xb[i, 0], Xb[i, 1]))
ax.legend()
# %% función error
def x2flat(x):
'''
retorna los valores optimizables de x como vector
'''
return np.concatenate([[x[1,0]], np.reshape(x[2:], -1)])
def flat2x(xF):
'''
reteorna las coordenadas a partir del vector flat
'''
return np.concatenate([ np.array([[0.0, 0],[xF[0], 0.0]]),
np.reshape(xF[1:], (-1,2))])
def distEr(d1, d2):
'''
metrica del error del vector de distancias
'''
return np.sqrt(np.mean((d1 - d2)**2))
def dists(xF):
'''
calcula la matriz de distancias , solo la mitad triangular superior
'''
x = flat2x(xF)
n = x.shape[0]
d = np.zeros((n, n), dtype=float)
for i in range(n-1): # recorro cada fila
d[i, i+1:] = ln.norm(x[i+1:] - x[i], axis=1)
return d[np.triu_indices(n, k=1)]
# los indices para leer las distancias de la matriz
upInd = np.triu_indices_from(db, k=1)
## defino el jacobiano de las distancias vs x
#Jd = ndf.Jacobian(dists)
#
#def newtonOpt(Xb, db, ep=1e-15):
# errores = list()
# d = db[upInd]
#
# #print("cond inicial")
# xF = x2flat(Xb)
# D = dists(xF)
# errores.append(distEr(d, D))
# #print(errores[-1])
#
# # hago un paso
# j = Jd(xF)
# xFp = xF + ln.pinv(j).dot(d - D)
# D = dists(xFp)
# errores.append(distEr(d, D))
# xF = xFp
#
# # mientras las correcciones sean mayores aun umbral
# while np.mean(np.abs(xFp - xF)) > ep:
# # for i in range(10):
# #print("paso")
# j = Jd(xF)
# xFp = xFp + ln.pinv(j).dot(d - D)
# D = dists(xF)
# errores.append(distEr(d, D))
# xF = xFp
# #print(errores[-1])
#
# return flat2x(xFp), errores
#
#xBOpt, e1 = newtonOpt(Xb, db)
#xGOpt, e2 = newtonOpt(Xg, dg)
#
## %%
#fig, ax = plt.subplots()
#ax.scatter(xBOpt.T[0], xBOpt.T[1], label='bosch optimo')
#ax.scatter(Xb.T[0], Xb.T[1], label='bosch inicial')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xBOpt[i, 0], xBOpt[i, 1]))
#ax.legend()
#
#
#fig, ax = plt.subplots()
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='google earth optimo')
#ax.scatter(Xg.T[0], Xg.T[1], label='google earth inicial')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xGOpt[i, 0], xGOpt[i, 1]))
#ax.legend()
#
#
#
#fig, ax = plt.subplots()
#ax.scatter(xBOpt.T[0], xBOpt.T[1], label='bosch optimo')
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='google-earth optimo')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xBOpt[i, 0], xBOpt[i, 1]))
#ax.legend()
# %%esto no me acuerdo que es
dbFlat = db[upInd]
dgFlat = dg[upInd]
dif = dgFlat - dbFlat
plt.figure()
plt.scatter(dbFlat, dgFlat - dbFlat)
np.cov(dif)
# %% ahora sacar la incerteza en todo esto y optimizar
# establecer funcion error escalar
def distEr2(d1, d2):
'''
metrica del error del vector de distancias
'''
return np.sum((d1 - d2)**2)
def xEr(xF, d):
D = dists(xF)
return distEr2(d, D)
Jex = ndf.Jacobian(xEr)
Hex = ndf.Hessian(xEr)
def newtonOptE2(x, db, ep=1e-10):
errores = list()
d = db[upInd]
#print("cond inicial")
xF = x2flat(x)
D = dists(xF)
errores.append(distEr(d, D))
#print(errores[-1])
# hago un paso
A = Hex(xF, dbFlat)
B = Jex(xF, dbFlat)
l = np.real(ln.eig(A)[0])
print('autovals ', np.max(l), np.min(l))
dX = - ln.inv(A).dot(B.T)
xFp = xF + dX[:,0]
D = dists(xFp)
errores.append(distEr(d, D))
# mientras las correcciones sean mayores a un umbral
e = np.max(np.abs(xFp - xF))
print('correcciones ', e)
while e > ep:
xF = xFp
A = Hex(xF, dbFlat)
B = Jex(xF, dbFlat)
l = np.real(ln.eig(A)[0])
print('autovals ', np.max(l), np.min(l))
dX = - ln.inv(A).dot(B.T)
xFp = xF + dX[:,0]
D = dists(xFp)
errores.append(distEr(d, D))
e = np.max(np.abs(xFp - xF))
print('correcciones ', e)
xF = xFp
return flat2x(xFp), errores
# %%
#xF = x2flat(xBOpt)
#xEr(xF, dbFlat)
# optimizo
xbOptE2, e2 = newtonOptE2(Xb, db)
xbOptE2Flat = x2flat(xbOptE2)
Hopt = Hex(xbOptE2Flat, dbFlat) #hessiano en el optimo
Sopt = ln.inv(Hopt)
# grafico la covarianza
plt.matshow(Sopt)
fig, ax = plt.subplots()
for i, tx in enumerate(names):
ax.annotate(tx, (xbOptE2[i, 0], xbOptE2[i, 1]))
ax.scatter(Xb.T[0], Xb.T[1], label='bosch inicial')
ax.scatter(xbOptE2.T[0], xbOptE2.T[1], label='bosch optimizado')
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='distancias google earth')
ax.legend()
# %%
fi = np.linspace(0, 2*np.pi, 100)
r = np.sqrt(chdtri(2, 0.5)) # radio para que 90% caigan adentro
# r = 1
Xcirc = np.array([np.cos(fi), np.sin(fi)]) * r
def unit2CovTransf(C):
'''
returns the matrix that transforms points from unit normal pdf to a normal
pdf of covariance C. so that
Xnorm = np.random.randn(2,n) # generate random points in 2D
T = unit2CovTransf(C) # calculate transform matriz
X = np.dot(T, Xnorm) # points that follow normal pdf of cov C
'''
l, v = ln.eig(C)
# matrix such that A.dot(A.T)==C
T = np.sqrt(l.real) * v
return T
def plotEllipse(ax, C, mux, muy, col):
'''
se grafica una elipse asociada a la covarianza c, centrada en mux, muy
'''
T = unit2CovTransf(C)
# roto reescaleo para lleve del circulo a la elipse
xeli, yeli = np.dot(T, Xcirc)
ax.plot(xeli+mux, yeli+muy, c=col, lw=0.5)
v1, v2 = r * T.T
ax.plot([mux, mux + v1[0]], [muy, muy + v1[1]], c=col, lw=0.5)
ax.plot([mux, mux + v2[0]], [muy, muy + v2[1]], c=col, lw=0.5)
# %% plotear las elipses de las posiciones
xbOptE2
Sopt
col = 'b'
fig, ax = plt.subplots()
for i, tx in enumerate(names):
ax.annotate(tx, (xbOptE2[i, 0], xbOptE2[i, 1]))
ax.scatter(xbOptE2[:,0], xbOptE2[:,1])
ax.errorbar(xbOptE2[1,0], xbOptE2[1,1], xerr=np.sqrt(Sopt[0,0]), yerr=0)
ax.set_aspect('equal')
for i in range(2, len(xbOptE2)):
print(i)
i1 = 2 * i - 3
i2 = i1 + 2
C = Sopt[i1:i2,i1:i2]
print(C)
plotEllipse(ax, C, xbOptE2[i,0], xbOptE2[i,1], col)
ax.legend()
# %% calculo la rototraslacion y escaleo mas apropiados para pasar de uno a otro
def findrototras(x1, x2):
'''
recibe lista de coordenadas x1, x2 (-1, 2)
y retorna la matriz que transforma x2 = T*x1
con T de 2x3
'''
x, y = x1.T
ons = np.ones_like(x)
zer = np.zeros_like(x)
Ax = np.array([x, y, ons, zer])
Ay = np.array([y, -x, zer, ons])
A = np.hstack((Ax, Ay)).T
print(Ax.shape, A.shape)
Y = np.hstack((x2[:, 0], x2[:, 1]))
print(Y.shape)
sol = ln.pinv(A).dot(Y)
R = np.array([[sol[0], sol[1]],
[-sol[1], sol[0]]])
T = np.array([[sol[2]], [sol[3]]])
return R, T
# %%
gps2b = findrototras(xGPS, xbOptE2)
xBFromGPS = (np.dot(gps2b[0], xGPS.T) + gps2b[1]).T
# %%
fig, ax = plt.subplots()
for i, tx in enumerate(names):
ax.annotate(tx, (xbOptE2[i, 0], xbOptE2[i, 1]))
ax.scatter(xbOptE2.T[0], xbOptE2.T[1], label='bosch optimizado')
ax.scatter(xBFromGPS.T[0], xBFromGPS.T[1], label='google earth to bosch')
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='distancias google earth')
ax.legend()
| gpl-2.0 |
mjgrav2001/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
ssaeger/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
AdityaSoni19031997/Machine-Learning | Classifying_datasets/MNIST/neural_networks.py | 1 | 4282 | '''
Using ANN Classifying Handwritten Digits
My first attempt to build a neural network....for evaluating the famous MNIST Digit Classification"
-Aditya Soni
'''
#Import Statements
import numpy as np # for fast calculations
import matplotlib.pyplot as plt # for plotiing
import scipy.special # for sigmoid function
from sklearn.metrics import confusion_matrix
k = list()
k_ =list()
class NeuralNetworks:
# initialising nn
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate ):
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.lr = learningrate
#weights
self.wih = np.random.normal(0.0 , pow(self.hnodes , -0.5),(self.hnodes, self.inodes))
self.who = np.random.normal(0.0 , pow(self.onodes , -0.5),(self.onodes , self.hnodes))
self.activation_function = lambda x: scipy.special.expit(x)
pass
#train the ANN
#the subtle part....
# it is quite similar to the query function
def train(self, input_list, target_list):
#converting input to 2d array
inputs = np.array(input_list , ndmin = 2).T
targets = np.array(target_list , ndmin =2).T
#calculate signals into hidden layer
hidden_inputs = np.dot(self.wih , inputs)
#calculating o/p from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
#calculating signals into final layer
final_inputs = np.dot(self.who , hidden_outputs)
# calculating final o/s value
final_outputs = self.activation_function(final_inputs)
#error is target - actual value
output_errors = targets - final_outputs
#applying backpropagation logic now (state of art of ANN in ML)
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = np.dot(self.who.T, output_errors)
#updating the weights for the link between hidden and output layers
# the formula we apply is eta*y(1-y)*o/p
self.who += self.lr*np.dot((output_errors * final_outputs * (1 - final_outputs)), np.transpose(hidden_outputs))
self.wih += self.lr*np.dot((hidden_errors * hidden_outputs *(1 - hidden_outputs)), np.transpose(inputs))
pass
def query(self, input_list):
#converting input to 2d array
inputs = np.array(input_list , ndmin = 2).T
#calculate signals into hidden layer
hidden_inputs = np.dot(self.wih , inputs)
#calculating o/p from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
#calculating signals into final layer
final_inputs = np.dot(self.who , hidden_outputs)
# calculating final o/s value
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes = 784 #28*28
hidden_nodes = 300
output_nodes = 10
learning_rate = 0.15
#creating an instance of the class.....
n = NeuralNetworks(input_nodes , hidden_nodes , output_nodes , learning_rate)
#loading the dataset.......
train_data_f = open("C:\Python27\mnist\mnist_train.csv" , 'r')
train_data_all = train_data_f.readlines()
train_data_f.close()
for rec in train_data_all:
all_val = rec.split(',')
inputs = (np.asfarray(all_val[1:])/255.0*.99) + 0.01
targets = np.zeros(output_nodes) + 0.01
targets[int(all_val[0])] = 0.99
n.train(inputs , targets)
test_data_f = open("C:\Python27\mnist\mnist_test.csv" , 'r')
test_data_all = test_data_f.readlines()
test_data_f.close()
for rec in test_data_all:
all_val = rec.split(',')
p = (n.query((np.asfarray(all_val[1:])/255*.99)+0.01))
#print max(list(p)) , list(p).index(max(list(p)))
k.append(list(p).index(max(list(p))))
k_.append(int(all_val[0]))
print confusion_matrix(k_ , k)
print np.trace(np.asarray(confusion_matrix(k_ , k)))/10000.0
#1st test
all_values = test_data_all[0].split(',')
print(all_values[0])
img_array = np.asfarray(all_values[1:]).reshape(28,28)
plt.imshow(img_array , cmap='Greys',interpolation="None")
plt.show()
print n.query((np.asfarray(all_values[1:])/255*.99)+0.01)
#2nd test
all_values = test_data_all[99].split(',')
print(all_values[0])
img_array = np.asfarray(all_values[1:]).reshape(28,28)
plt.imshow(img_array , cmap='Greys',interpolation="None")
plt.show()
print n.query((np.asfarray(all_values[1:])/255*.99)+0.01)
| mit |
godrayz/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
madjelan/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
cgre-aachen/gempy | gempy/utils/extract_geomodeller_data.py | 1 | 9895 |
from pylab import *
import copy
import pandas as pn
import gempy as gp
import numpy as np
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class ReadGeoModellerXML:
def __init__(self, fp):
"""
Reads in and parses a GeoModeller XML file to extract interface and orientation data and the overall model
settings (e.g. extent and sequential pile). It uses ElementTree to parse the XML and the tree's root can
be accessed using self.root for more direct access to the file.
Todo: - extract faults
Args:
fp (str): Filepath for the GeoModeller xml file to be read.
"""
self.tree = ET.ElementTree(file=fp) # load xml as tree
self.root = self.tree.getroot()
self.xmlns = "http://www.geomodeller.com/geo"
self.gml = "http://www.opengis.net/gml"
self.extent = self._get_extent()
self.data = self.extract_data()
self.series = list(self.data.keys())
self.stratigraphic_column, self.surface_points, self.orientations = self.get_dataframes()
self.series_info = self._get_series_fmt_dict()
self.faults = self.get_faults()
self.series_distribution = self.get_series_distribution()
self.fault_matrix = self.get_fault_matrix()
def _get_extent(self):
"""
Extracts model extent from ElementTree root and returns it as tuple of floats.
Returns:
tuple: Model extent as (xmin, xmax, ymin, ymax, zmin, zmax).
"""
xy = self.root[0][0][0][0].attrib
z = self.root[0][0][0][1].attrib
return tuple(np.array([xy["Xmin"], xy["Xmax"],
xy["Ymin"], xy["Ymax"],
z["Zmin"], z["Zmax"]]).astype(float))
def extract_data(self):
"""
Extracts relevant data from the GeoModeller XML file ElementTree root (self.root) and returns it as a dictionary.
Returns:
(dict): Data dictionary
"""
data = {}
for s in self.get_psc():
sn = s.get("name")
data[sn] = {} # create a dict for each series
data[sn]["formations"] = []
data[sn]["InfluencedByFault"] = []
data[sn]["relation"] = s.get("relation") # add relation, whatever that is
for c in s:
if c.tag == "{" + self.xmlns + "}Data": # append formation names to list of formations
data[sn]["formations"].append(c.get("Name"))
if c.tag == "{" + self.xmlns + "}InfluencedByFault": # add fault influences
data[sn]["InfluencedByFault"].append(c.get("Name"))
if c.tag == "{" + self.xmlns + "}PotentialField":
data[sn]["gradients"] = []
data[sn]["interfaces"] = []
data[sn]["interfaces_counters"] = []
data[sn]["solutions"] = []
data[sn]["constraints"] = []
for cc in c:
# COVARIANCE
if cc.tag == "{" + self.xmlns + "}covariance":
data[sn]["covariance"] = cc.attrib
# GRADIENTS
if cc.tag == "{" + self.xmlns + "}Gradients":
for gr in cc:
data[sn]["gradients"].append([gr.get("Gx"), gr.get("Gy"), gr.get("Gz"),
gr.get("XGr"), gr.get("YGr"), gr.get("ZGr")])
# INTERFACES
if cc.tag == "{" + self.xmlns + "}Points":
for co in cc:
data[sn]["interfaces"].append([float(co[0].text), float(co[1].text), float(co[2].text)])
# INTERFACE COUNTERS
if cc.tag == "{" + self.xmlns + "}InterfacePoints":
for ip in cc:
data[sn]["interfaces_counters"].append([int(ip.get("npnt")), int(ip.get("pnt"))])
# CONSTRAINTS
if cc.tag == "{" + self.xmlns + "}Constraints":
for co in cc:
data[sn]["constraints"].append(float(co.get("value")))
# SOLUTIONS
if cc.tag == "{" + self.xmlns + "}Solutions":
for sol in cc:
data[sn]["solutions"].append(float(sol.get("sol")))
if cc.tag == "{" + self.xmlns + "}ModelFaults":
print('hey')
# convert from str to float
data[sn]["gradients"] = np.array(data[sn]["gradients"]).astype(float)
data[sn]["interfaces"] = np.array(data[sn]["interfaces"]).astype(float)
data[sn]["interfaces_counters"] = np.array(data[sn]["interfaces_counters"]).astype(float)
data[sn]["solutions"] = np.array(data[sn]["solutions"]).astype(float)
return data
def get_dataframes(self):
strat_pile = dict.fromkeys(self.series)
surface_points = pn.DataFrame()
orientations = pn.DataFrame()
for serie in self.series:
strat_pile[serie] = self.data[serie]['formations']
interf_s = self.data[serie].get('interfaces')
orient_s = self.data[serie].get('gradients')
formations = self.data[serie].get('formations')
if interf_s is not None:
interf = pn.DataFrame(columns=['X', 'Y', 'Z'], data=interf_s)
interf['series'] = serie
if len(formations) > 1:
interf_formations = []
for j, fmt in enumerate(formations):
for n in range(int(self.data[serie].get('interfaces_counters')[j, 0])):
interf_formations.append(fmt)
interf['formation'] = interf_formations
else:
interf['formation'] = formations[0]
surface_points = pn.DataFrame.append(surface_points, interf)
if orient_s is not None:
orient = pn.DataFrame(columns=['G_x', 'G_y', 'G_z', 'X', 'Y', 'Z'], data=orient_s)
orient['series'] = serie
orient['formation'] = formations[0] # formation is wrong here but does not matter for orientations
orientations = pn.DataFrame.append(orientations, orient)
return strat_pile, surface_points, orientations
def get_psc(self):
"""Returns the ProjectStratigraphicColumn tree element used for several data extractions."""
return self.root.find("{" + self.xmlns + "}GeologicalModel").find(
"{" + self.xmlns + "}ProjectStratigraphicColumn")
def get_order_formations(self):
order_formations = []
for entry in self.series_distribution.values():
if type(entry) is str:
order_formations.append(entry)
elif type(entry) is tuple:
for e in entry:
order_formations.append(e)
return order_formations
def get_faults(self):
"""
Extracts fault names from ElementTree root.
Returns:
tuple: Fault names (str) ordered as in the GeoModeller XML.
"""
faults = []
for c in self.root[2]:
faults.append(c.get("Name"))
return tuple(faults)
def get_series_distribution(self):
"""
Combines faults and stratigraphic series into an unordered dictionary as keys and maps the correct
formations to them as a list value. Faults series get a list of their own string assigned as formation.
Returns:
(dict): maps Series (str) -> Formations (list of str)
"""
series_distribution = {}
for key in self.series_info.keys():
fmts = self.series_info[key]["formations"]
if len(fmts) == 1:
series_distribution[key] = fmts[0]
else:
series_distribution[key] = tuple(fmts)
for f in self.stratigraphic_column:
if "Fault" in f or "fault" in f:
series_distribution[f] = f
return series_distribution
def _get_series_fmt_dict(self):
sp = {}
for i, s in enumerate(self.stratigraphic_column): # loop over all series
fmts = [] # init formation storage list
influenced_by = [] # init influenced by list
for c in self.root.find("{" + self.xmlns + "}GeologicalModel").find(
"{" + self.xmlns + "}ProjectStratigraphicColumn")[i]:
if "Data" in c.tag:
fmts.append(c.attrib["Name"])
elif "InfluencedByFault" in c.tag:
influenced_by.append(c.attrib["Name"])
# print(fmts)
sp[s] = {}
sp[s]["formations"] = fmts
sp[s]["InfluencedByFault"] = influenced_by
return sp
def _where_do_faults_stop(self):
fstop = {}
for i, f in enumerate(self.root[2]):
stops_on = []
for c in self.root[2][i][2:]:
stops_on.append(c.get("Name"))
fstop[f.get("Name")] = stops_on
return fstop
def get_fault_matrix(self):
nf = len(self.faults)
fm = np.zeros((nf, nf)) # zero matrix of n_faults²
fstop = self._where_do_faults_stop()
for i, f in enumerate(self.faults):
for fs in fstop[f]:
j = np.where(np.array(self.faults) == fs)[0][0]
fm[i, j] = 1
return fm
| lgpl-3.0 |
followthesheep/galpy | galpy/snapshot_src/Snapshot.py | 2 | 13530 | import numpy as nu
from galpy.orbit import Orbit
from galpy.potential_src.planarPotential import RZToplanarPotential
import galpy.util.bovy_plot as plot
from directnbody import direct_nbody
class Snapshot(object):
"""General snapshot = collection of particles class"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize a snapshot object
INPUT:
Initialize using:
1) list of orbits, list of masses (masses=)
Coming soon:
2) observations
3) DFs to draw from
OUTPUT:
HISTORY:
2011-02-02 - Started - Bovy
"""
if isinstance(args[0],list) and isinstance(args[0][0],Orbit):
self.orbits= args[0]
if kwargs.has_key('masses'):
self.masses= kwargs['masses']
else:
self.masses= nu.ones(len(self.orbits))
return None
def integrate(self,t,pot=None,method='test-particle',
**kwargs):
"""
NAME:
integrate
PURPOSE:
integrate the snapshot in time
INPUT:
t - numpy.array of times to save the snapshots at (must start at 0)
pot= potential object or list of such objects (default=None)
method= method to use ('test-particle' or 'direct-python' for now)
OUTPUT:
list of snapshots at times t
HISTORY:
2011-02-02 - Written - Bovy (NYU)
"""
if method.lower() == 'test-particle':
return self._integrate_test_particle(t,pot)
elif method.lower() == 'direct-python':
return self._integrate_direct_python(t,pot,**kwargs)
def _integrate_test_particle(self,t,pot):
"""Integrate the snapshot as a set of test particles in an external \
potential"""
#Integrate all the orbits
for o in self.orbits:
o.integrate(t,pot)
#Return them as a set of snapshots
out= []
for ii in range(len(t)):
outOrbits= []
for o in self.orbits:
outOrbits.append(o(t[ii]))
out.append(Snapshot(outOrbits,self.masses))
return out
def _integrate_direct_python(self,t,pot,**kwargs):
"""Integrate the snapshot using a direct force summation method \
written entirely in python"""
#Prepare input for direct_nbody
q= []
p= []
nq= len(self.orbits)
dim= self.orbits[0].dim()
if pot is None:
thispot= None
elif dim == 2:
thispot= RZToplanarPotential(pot)
else:
thispot= pot
for ii in range(nq):
#Transform to rectangular frame
if dim == 1:
thisq= nu.array([self.orbits[ii].x()]).flatten()
thisp= nu.array([self.orbits[ii].vx()]).flatten()
elif dim == 2:
thisq= nu.array([self.orbits[ii].x(),
self.orbits[ii].y()]).flatten()
thisp= nu.array([self.orbits[ii].vx(),
self.orbits[ii].vy()]).flatten()
elif dim == 3:
thisq= nu.array([self.orbits[ii].x(),
self.orbits[ii].y(),
self.orbits[ii].z()]).flatten()
thisp= nu.array([self.orbits[ii].vx(),
self.orbits[ii].vy(),
self.orbits[ii].vz()]).flatten()
q.append(thisq)
p.append(thisp)
#Run simulation
nbody_out= direct_nbody(q,p,self.masses,t,pot=thispot,**kwargs)
#Post-process output
nt= len(nbody_out)
out= []
for ii in range(nt):
snap_orbits= []
for jj in range(nq):
if dim == 3:
#go back to the cylindrical frame
R= nu.sqrt(nbody_out[ii][0][jj][0]**2.
+nbody_out[ii][0][jj][1]**2.)
phi= nu.arccos(nbody_out[ii][0][jj][0]/R)
if nbody_out[ii][0][jj][1] < 0.: phi= 2.*nu.pi-phi
vR= nbody_out[ii][1][jj][0]*nu.cos(phi)\
+nbody_out[ii][1][jj][1]*nu.sin(phi)
vT= nbody_out[ii][1][jj][1]*nu.cos(phi)\
-nbody_out[ii][1][jj][0]*nu.sin(phi)
vxvv= nu.zeros(dim*2)
vxvv[3]= nbody_out[ii][0][jj][2]
vxvv[4]= nbody_out[ii][1][jj][2]
vxvv[0]= R
vxvv[1]= vR
vxvv[2]= vT
vxvv[5]= phi
if dim == 2:
#go back to the cylindrical frame
R= nu.sqrt(nbody_out[ii][0][jj][0]**2.
+nbody_out[ii][0][jj][1]**2.)
phi= nu.arccos(nbody_out[ii][0][jj][0]/R)
if nbody_out[ii][0][jj][1] < 0.: phi= 2.*nu.pi-phi
vR= nbody_out[ii][1][jj][0]*nu.cos(phi)\
+nbody_out[ii][1][jj][1]*nu.sin(phi)
vT= nbody_out[ii][1][jj][1]*nu.cos(phi)\
-nbody_out[ii][1][jj][0]*nu.sin(phi)
vxvv= nu.zeros(dim*2)
vxvv[0]= R
vxvv[1]= vR
vxvv[2]= vT
vxvv[3]= phi
if dim == 1:
vxvv= [nbody_out[ii][0][jj],nbody_out[ii][1][jj]]
snap_orbits.append(Orbit(vxvv))
out.append(Snapshot(snap_orbits,self.masses))
return out
#Plotting
def plot(self,*args,**kwargs):
"""
NAME:
plot
PURPOSE:
plot the snapshot (with reasonable defaults)
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...)
d2= second dimension to plot
matplotlib.plot inputs+bovy_plot.plot inputs
OUTPUT:
sends plot to output device
HISTORY:
2011-02-06 - Written based on Orbit's plot
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
#Defaults
if not kwargs.has_key('d1') and not kwargs.has_key('d2'):
if len(self.orbits[0].vxvv) == 3:
d1= 'R'
d2= 'vR'
elif len(self.orbits[0].vxvv) == 4:
d1= 'x'
d2= 'y'
elif len(self.orbits[0].vxvv) == 2:
d1= 'x'
d2= 'vx'
elif len(self.orbits[0].vxvv) == 5 \
or len(self.orbits[0].vxvv) == 6:
d1= 'R'
d2= 'z'
elif not kwargs.has_key('d1'):
d2= kwargs['d2']
kwargs.pop('d2')
d1= 't'
elif not kwargs.has_key('d2'):
d1= kwargs['d1']
kwargs.pop('d1')
d2= 't'
else:
d1= kwargs['d1']
kwargs.pop('d1')
d2= kwargs['d2']
kwargs.pop('d2')
#Get x and y
if d1 == 'R':
x= [o.R() for o in self.orbits]
elif d1 == 'z':
x= [o.z() for o in self.orbits]
elif d1 == 'vz':
x= [o.vz() for o in self.orbits]
elif d1 == 'vR':
x= [o.vR() for o in self.orbits]
elif d1 == 'vT':
x= [o.vT() for o in self.orbits]
elif d1 == 'x':
x= [o.x() for o in self.orbits]
elif d1 == 'y':
x= [o.y() for o in self.orbits]
elif d1 == 'vx':
x= [o.vx() for o in self.orbits]
elif d1 == 'vy':
x= [o.vy() for o in self.orbits]
elif d1 == 'phi':
x= [o.phi() for o in self.orbits]
if d2 == 'R':
y= [o.R() for o in self.orbits]
elif d2 == 'z':
y= [o.z() for o in self.orbits]
elif d2 == 'vz':
y= [o.vz() for o in self.orbits]
elif d2 == 'vR':
y= [o.vR() for o in self.orbits]
elif d2 == 'vT':
y= [o.vT() for o in self.orbits]
elif d2 == 'x':
y= [o.x() for o in self.orbits]
elif d2 == 'y':
y= [o.y() for o in self.orbits]
elif d2 == 'vx':
y= [o.vx() for o in self.orbits]
elif d2 == 'vy':
y= [o.vy() for o in self.orbits]
elif d2 == 'phi':
y= [o.phi() for o in self.orbits]
#Plot
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= labeldict[d1]
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= labeldict[d2]
if len(args) == 0:
args= (',',)
plot.bovy_plot(x,y,*args,**kwargs)
def plot3d(self,*args,**kwargs):
"""
NAME:
plot3d
PURPOSE:
plot the snapshot in 3D (with reasonable defaults)
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...)
d2= second dimension to plot
d3= third dimension to plot
matplotlib.plot inputs+bovy_plot.plot3d inputs
OUTPUT:
sends plot to output device
HISTORY:
2011-02-06 - Written based on Orbit's plot3d
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
#Defaults
if not kwargs.has_key('d1') and not kwargs.has_key('d2') \
and not kwargs.has_key('d3'):
if len(self.orbits[0].vxvv) == 3:
d1= 'R'
d2= 'vR'
d3= 'vT'
elif len(self.orbits[0].vxvv) == 4:
d1= 'x'
d2= 'y'
d3= 'vR'
elif len(self.orbits[0].vxvv) == 2:
raise AttributeError("Cannot plot 3D aspects of 1D orbits")
elif len(self.orbits[0].vxvv) == 5:
d1= 'R'
d2= 'vR'
d3= 'z'
elif len(self.orbits[0].vxvv) == 6:
d1= 'x'
d2= 'y'
d3= 'z'
elif not (kwargs.has_key('d1') and kwargs.has_key('d2') \
and kwargs.has_key('d3')):
raise AttributeError("Please provide 'd1', 'd2', and 'd3'")
else:
d1= kwargs['d1']
kwargs.pop('d1')
d2= kwargs['d2']
kwargs.pop('d2')
d3= kwargs['d3']
kwargs.pop('d3')
#Get x, y, and z
if d1 == 'R':
x= [o.R() for o in self.orbits]
elif d1 == 'z':
x= [o.z() for o in self.orbits]
elif d1 == 'vz':
x= [o.vz() for o in self.orbits]
elif d1 == 'vR':
x= [o.vR() for o in self.orbits]
elif d1 == 'vT':
x= [o.vT() for o in self.orbits]
elif d1 == 'x':
x= [o.x() for o in self.orbits]
elif d1 == 'y':
x= [o.y() for o in self.orbits]
elif d1 == 'vx':
x= [o.vx() for o in self.orbits]
elif d1 == 'vy':
x= [o.vy() for o in self.orbits]
elif d1 == 'phi':
x= [o.phi() for o in self.orbits]
if d2 == 'R':
y= [o.R() for o in self.orbits]
elif d2 == 'z':
y= [o.z() for o in self.orbits]
elif d2 == 'vz':
y= [o.vz() for o in self.orbits]
elif d2 == 'vR':
y= [o.vR() for o in self.orbits]
elif d2 == 'vT':
y= [o.vT() for o in self.orbits]
elif d2 == 'x':
y= [o.x() for o in self.orbits]
elif d2 == 'y':
y= [o.y() for o in self.orbits]
elif d2 == 'vx':
y= [o.vx() for o in self.orbits]
elif d2 == 'vy':
y= [o.vy() for o in self.orbits]
elif d2 == 'phi':
y= [o.phi() for o in self.orbits]
if d3 == 'R':
z= [o.R() for o in self.orbits]
elif d3 == 'z':
z= [o.z() for o in self.orbits]
elif d3 == 'vz':
z= [o.vz() for o in self.orbits]
elif d3 == 'vR':
z= [o.vR() for o in self.orbits]
elif d3 == 'vT':
z= [o.vT() for o in self.orbits]
elif d3 == 'x':
z= [o.x() for o in self.orbits]
elif d3 == 'y':
z= [o.y() for o in self.orbits]
elif d3 == 'vx':
z= [o.vx() for o in self.orbits]
elif d3 == 'vy':
z= [o.vy() for o in self.orbits]
elif d3 == 'phi':
z= [o.phi() for o in self.orbits]
#Plot
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= labeldict[d1]
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= labeldict[d2]
if not kwargs.has_key('zlabel'):
kwargs['zlabel']= labeldict[d3]
if len(args) == 0: args= (',',)
plot.bovy_plot3d(x,y,z,*args,**kwargs)
#Pickling
def __getstate__(self):
return (self.orbits, self.masses)
def __setstate__(self,state):
self.orbits= state[0]
self.masses= state[1]
| bsd-3-clause |
mjvakili/gambly | code/tests/test_data.py | 1 | 5063 | '''
testing how the model fits the data
'''
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
import matplotlib.pyplot as plt
import os.path as path
import time
from Corrfunc import _countpairs
from Corrfunc.utils import read_catalog
# --- Local ---
# --- halotools ---
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import HodModelFactory
from halotools.empirical_models import TrivialPhaseSpace, Zheng07Cens
from halotools.empirical_models import Zheng07Sats , HeavisideAssembias
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.mock_observables import tpcf
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from halotools.mock_observables import FoFGroups
from halotools.mock_observables.pair_counters import npairs_3d
from halotools.mock_observables.catalog_analysis_helpers import return_xyz_formatted_array
import matplotlib.pyplot as plt
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
from Corrfunc.utils import read_catalog
from halotools.empirical_models import NFWPhaseSpace
from halotools.empirical_models import enforce_periodicity_of_box
prettyplot()
class AssembiasZheng07Sats(Zheng07Sats, HeavisideAssembias):
def __init__(self, **kwargs):
Zheng07Sats.__init__(self, threshold = -21)
HeavisideAssembias.__init__(self,
method_name_to_decorate = 'mean_occupation',
lower_assembias_bound = 0.,
upper_assembias_bound = np.inf,
**kwargs)
cens_occ_model = Zheng07Cens(threshold = -21)
cens_prof_model = TrivialPhaseSpace()
sats_occ_model = AssembiasZheng07Sats()
sats_prof_model = NFWPhaseSpace()
model= HodModelFactory(
centrals_occupation = cens_occ_model,
centrals_profile = cens_prof_model,
satellites_occupation = sats_occ_model,
satellites_profile = sats_prof_model)
def main():
cov = np.loadtxt("../../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400")
f_MD = (1. + 71.74*10**6. / (1000.)**3.)
f_bol = (1. + 71.74*10.**6. / (250.)**3.)
print("covariance correction factor=" , f_bol/f_MD)
cov = cov*f_bol/f_MD
model.param_dict['logM0'] = 12.59
model.param_dict['sigma_logM'] = 0.49
model.param_dict['logMmin'] = 12.78
model.param_dict['alpha'] = 1.14
model.param_dict['logM1'] = 13.99
model.param_dict['mean_occupation_satellites_assembias_param1'] = 0.0
halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
model.populate_mock(halocat, enforce_PBC = True)
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
print("modelnumber density=" , len(pos)/halocat.Lbox**3.)
print("data number density=" , 1.16*10**-3.)
x = model.mock.galaxy_table['x']
y = model.mock.galaxy_table['y']
z = model.mock.galaxy_table['z']
vz = model.mock.galaxy_table['vz']
# applying RSD
pos = return_xyz_formatted_array(x, y, z, velocity = vz, velocity_distortion_dimension = 'z')
# enfprcing PBC
pos = enforce_periodicity_of_box(pos, halocat.Lbox)
tstart = time.time()
t0 = tstart
pos = pos.astype(np.float32)
x, y, z = pos[:,0] , pos[:,1] , pos[:,2]
t1 = time.time()
print("Done reading the data - time taken = {0:10.1f} seconds"
.format(t1 - t0))
print("Beginning Correlation functions calculations")
boxsize = halocat.Lbox
nthreads = 4
pimax = 40.0
binfile = path.join(path.dirname(path.abspath(__file__)),
"../../", "bin")
autocorr = 1
numbins_to_print = 12
print("\nRunning 2-D projected correlation function wp(rp)")
results_wp = _countpairs.countpairs_wp(boxsize, pimax, nthreads,
binfile, x, y, z)
print("\n# ****** wp: first {0} bins ******* "
.format(numbins_to_print))
print("# rmin rmax rpavg wp npairs")
print("##########################################################")
for ibin in range(numbins_to_print):
items = results_wp[ibin]
print("{0:12.4f} {1:12.4f} {2:10.4f} {3:10.1f} {4:10d}"
.format(items[0], items[1], items[2], items[3], items[4]))
print("-----------------------------------------------------------")
data_wp = np.loadtxt("../../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400")[:,1]
print(data_wp.shape)
data_wp_error = np.sqrt(np.diag(cov)[:12])
print(data_wp_error.shape)
rbins = np.loadtxt(binfile)
rs = np.mean(rbins , axis = 1)
plt.figure(figsize=(10,10))
plt.errorbar(rs , data_wp , data_wp_error , fmt=".k" , capsize = 2)
plt.plot(rs , np.array(results_wp)[:,3])
plt.loglog()
plt.savefig("wp.pdf")
if __name__ == "__main__":
main()
| mit |
nvoron23/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/svm/classes.py | 6 | 40597 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
mikeireland/opticstools | playground/nuller_with_phase.py | 1 | 1258 | """
The CSV input files came from WebPlotDigitizer and Harry's plots.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
imbalance = np.genfromtxt('harry_imbalance.csv', delimiter=',')
phase_deg = np.genfromtxt('harry_phase.csv', delimiter=',')
#Wavelength range
wave = np.linspace(3.7,4.3,51)
def worst_null(p, wave, imbalance, phase_deg, return_nulls=False):
"""
Return the worst null depth in dB, with negative numbers a deeper Null
"""
wmn = 0.5*(wave[0]+wave[-1])
mod_phase = p[0] + p[1]*(wave - wmn)
I1 = 0.5*(1+np.interp(wave, imbalance[:,0], imbalance[:,1]))
I2 = 1- I1
pdiff = np.interp(wave, phase_deg[:,0], phase_deg[:,1]) - mod_phase
nulls = np.abs(np.sqrt(I1) - np.sqrt(I2)*np.exp(1j*np.radians(pdiff)))**2
if return_nulls:
return nulls
else:
return 10*np.log10(np.max(nulls))
best_p = op.minimize(worst_null, [90, 0], args=(wave, imbalance, phase_deg), method='Nelder-Mead')
nulls = worst_null(best_p.x, wave, imbalance, phase_deg, return_nulls=True)
#Now make a plot of the null depth with the sign convention of Harry's plots.
plt.clf()
plt.plot(wave, -10*np.log10(nulls))
plt.xlabel('Wavelength (microns)')
plt.ylabel('Extinction (dB)') | mit |
henrykironde/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
plissonf/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
jmorton/yatsm | yatsm/classifiers/diagnostics.py | 1 | 7252 | import logging
import numpy as np
import scipy.ndimage
from sklearn.utils import check_random_state
# from sklearn.cross_validation import KFold, StratifiedKFold
logger = logging.getLogger('yatsm')
def kfold_scores(X, y, algo, kf_generator):
""" Performs KFold crossvalidation and reports mean/std of scores
Args:
X (np.ndarray): X feature input used in classification
y (np.ndarray): y labeled examples
algo (sklean classifier): classifier used from scikit-learn
kf_generator (sklearn crossvalidation generator): generator for indices
used in crossvalidation
Returns:
(mean, std): mean and standard deviation of crossvalidation scores
"""
scores = np.zeros(kf_generator.n_folds)
for i, (train, test) in enumerate(kf_generator):
scores[i] = algo.fit(X[train, :], y[train]).score(X[test, :], y[test])
logger.info('scores: {0}'.format(scores))
logger.info('score mean/std: {0}/{1}'.format(scores.mean(), scores.std()))
return scores.mean(), scores.std()
class SpatialKFold(object):
""" Spatial cross validation iterator
Training data samples physically located next to test samples are likely to
be strongly related due to spatial autocorrelation. This violation of
independence will artificially inflate crossvalidated measures of
algorithm performance.
Provides training and testing indices to split data into training and
testing sets. Splits a "Region of Interest" image into k consecutive
folds. Each fold is used as a validation set once while k - 1 remaining
folds form the training set.
Parameters:
y (np.ndarray): Labeled features
row (np.ndarray): Row (y) pixel location for each `y`
col (np.ndarray): Column (x) pixel location for each `x`
n_folds (int, optional): Number of folds (default: 3)
shuffle (bool, optional): Shuffle the unique training data regions before
splitting into batches (default: False)
random_state (None, int, or np.random.RandomState): Pseudo-random number
generator to use for random sampling. If None, default to numpy RNG
for shuffling
"""
shuffle = False
def __init__(self, y, row, col, n_folds=3, shuffle=False,
random_state=None):
self.y = y
self.row = row
self.col = col
self.n_folds = n_folds
if shuffle:
self.shuffle = True
self.rng = check_random_state(random_state)
self._recreate_labels()
def __iter__(self):
fold_sizes = (self.n // self.n_folds) * np.ones(self.n_folds,
dtype=np.int)
fold_sizes[:self.n % self.n_folds] += 1
current = 0
ind = np.arange(self.y.size)
for fold_size in fold_sizes:
start, stop = current, current + fold_size
test_i = self._labels_to_indices(self.labels[start:stop])
yield ind[test_i], ind[~test_i]
current = stop
def _recreate_labels(self):
""" Internal method to label regions of `self.y` from pixel locations
"""
roi = np.zeros((self.row.max() + 1, self.col.max() + 1),
dtype=self.y.dtype)
roi[self.row, self.col] = self.y
self.labeled, _ = scipy.ndimage.label(roi)
self.labels = np.unique(self.labeled[self.labeled != 0])
self.n = self.labels.size
if self.shuffle:
self.rng.shuffle(self.labels)
self.indices = []
def _labels_to_indices(self, labels):
lab_row, lab_col = np.where(np.in1d(
self.labeled, labels).reshape(self.labeled.shape))
return np.logical_and(np.in1d(self.row, lab_row),
np.in1d(self.col, lab_col))
class SpatialKFold_ROI(object):
""" Spatial cross validation iterator on ROI images
Training data samples physically located next to test samples are likely to
be strongly related due to spatial autocorrelation. This violation of
independence will artificially inflate crossvalidated measures of
algorithm performance.
Provides training and testing indices to split data into training and
testing sets. Splits a "Region of Interest" image into k consecutive
folds. Each fold is used as a validation set once while k - 1 remaining
folds form the training set.
Parameters:
roi (np.ndarray): "Region of interest" matrix providing training data
samples of some class
n_folds (int, optional): Number of folds (default: 3)
mask_values (int, list, tuple, or np.ndarray, optional): one or more
values within roi to ignore from sampling (default: [0])
shuffle (bool, optional): Shuffle the unique training data regions before
splitting into batches (default: False)
random_state (None, int, or np.random.RandomState): Pseudo-random number
generator to use for random sampling. If None, default to numpy RNG
for shuffling
"""
shuffle = False
def __init__(self, roi, n_folds=3, mask_values=[0], shuffle=False,
random_state=None):
self.roi = roi
self.n_folds = n_folds
if isinstance(mask_values, (float, int)):
self.mask_values = np.array([mask_values])
elif isinstance(mask_values, (list, tuple)):
self.mask_values = np.array(mask_values)
elif isinstance(mask_values, np.ndarray):
self.mask_values = mask_values
else:
raise TypeError('mask_values must be float, int, list, tuple,'
' or np.ndarray')
if shuffle:
self.shuffle = True
self.rng = check_random_state(random_state)
self._label_roi()
def __iter__(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
test_i = np.in1d(self.indices[:, 0], self.labels[start:stop])
train_i = np.in1d(self.indices[:, 0], self.labels[stop:])
yield ((self.indices[test_i, 1], self.indices[test_i, 2]),
(self.indices[train_i, 1], self.indices[train_i, 2]))
current = stop
def _label_roi(self):
""" Internal method to label region of interest image
"""
labeled, n = scipy.ndimage.label(self.roi)
labels = np.unique(labeled)
self.labels = labels[~np.in1d(labels, self.mask_values)]
self.n = self.labels.size
n_samples = (~np.in1d(self.roi, self.mask_values)).sum()
self.indices = np.zeros((n_samples, 3), dtype=np.int)
_start = 0
for l in self.labels:
_n = (labeled == l).sum()
_row, _col = np.where(labeled == l)
self.indices[_start:_start + _n, 0] = l
self.indices[_start:_start + _n, 1] = _row
self.indices[_start:_start + _n, 2] = _col
_start += _n
if self.shuffle:
self.rng.shuffle(self.labels)
| mit |
certik/hermes1d | examples/system_neutronics_fixedsrc2/plot.py | 3 | 1210 | import matplotlib.pyplot as plt
import numpy as np
# material data
Q = [0.0, 1.5, 1.8, 1.5, 1.8, 1.8, 1.5]
D1 = 7*[1.2]
D2 = 7*[0.4]
S1 = 7*[0.03]
S2 = [0.1, 0.2, 0.25, 0.2, 0.25, 0.25, 0.2]
S12= [0.02] + 6*[0.015]
nSf1 = [0.005] + 6*[0.0075]
nSf2 = 7*[0.1]
fig = plt.figure()
# one axes for each group
ax1 = fig.add_subplot(211)
ax1.grid(True)
ax1.axhline(0, color='black', lw=2)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.grid(True)
ax2.axhline(0, color='black', lw=2)
# computed solution
# group 1
data = np.loadtxt("solution.gp_0")
x = data[:, 0]
y = data[:, 1]
ax1.plot(x,y,label='approximate')
# group 2
data = np.loadtxt("solution.gp_1")
x = data[:, 0]
y = data[:, 1]
ax2.plot(x,y,label='approximate')
# analytic solution (valid only with certain distance from interfaces)
phi1 = np.array([]) # group 1
phi2 = np.array([]) # group 2
for i in range(0,7,1):
phi1loc = np.array(100*[ Q[i]/(S1[i] - nSf1[i] - nSf2[i]*S12[i]/S2[i]) ])
phi1 = np.append(phi1, phi1loc)
phi2 = np.append(phi2, S12[i]/S2[i]*phi1loc)
x = np.arange(0, 700, 1)
ax1.plot(x, phi1, 'k--', label='reference')
ax2.plot(x, phi2, 'k--', label='reference')
plt.axes(ax1)
plt.legend()
plt.axes(ax2)
plt.legend()
plt.show()
| bsd-3-clause |
makelove/OpenCV-Python-Tutorial | ch15-图像阈值/15.简单阈值threshold.py | 1 | 1439 | # -*- coding: utf-8 -*-
'''
简单阈值
像素值高于阈值时 我们给这个像素 赋予一个新值, 可能是白色 ,
否则我们给它赋予另外一种颜色, 或是黑色 。
这个函数就是 cv2.threshhold()。
这个函数的第一个参数就是原图像
原图像应 是灰度图。
第二个参数就是用来对像素值进行分类的阈值。
第三个参数 就是当像素值高于, 有时是小于 阈值时应该被赋予的新的像素值。
OpenCV 提供了多种不同的阈值方法 , 是由第四个参数来决定的。
些方法包括
• cv2.THRESH_BINARY
• cv2.THRESH_BINARY_INV • cv2.THRESH_TRUNC
• cv2.THRESH_TOZERO
• cv2.THRESH_TOZERO_INV
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('grey-gradient.jpg', 0)
ret, thresh1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
ret, thresh3 = cv2.threshold(img, 127, 255, cv2.THRESH_TRUNC)
ret, thresh4 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO)
ret, thresh5 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO_INV)
titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in range(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
| mit |
nmayorov/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 65 | 2838 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
pystockhub/book | ch18/day03/Kiwoom.py | 2 | 8383 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import time
import pandas as pd
import sqlite3
TR_REQ_TIME_INTERVAL = 0.2
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self._create_kiwoom_instance()
self._set_signal_slots()
def _create_kiwoom_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
def _set_signal_slots(self):
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
self.OnReceiveChejanData.connect(self._receive_chejan_data)
def comm_connect(self):
self.dynamicCall("CommConnect()")
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
def _event_connect(self, err_code):
if err_code == 0:
print("connected")
else:
print("disconnected")
self.login_event_loop.exit()
def get_code_list_by_market(self, market):
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market)
code_list = code_list.split(';')
return code_list[:-1]
def get_master_code_name(self, code):
code_name = self.dynamicCall("GetMasterCodeName(QString)", code)
return code_name
def get_connect_state(self):
ret = self.dynamicCall("GetConnectState()")
return ret
def get_login_info(self, tag):
ret = self.dynamicCall("GetLoginInfo(QString)", tag)
return ret
def set_input_value(self, id, value):
self.dynamicCall("SetInputValue(QString, QString)", id, value)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _comm_get_data(self, code, real_type, field_name, index, item_name):
ret = self.dynamicCall("CommGetData(QString, QString, QString, int, QString)", code,
real_type, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
def send_order(self, rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no):
self.dynamicCall("SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
[rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no])
def get_chejan_data(self, fid):
ret = self.dynamicCall("GetChejanData(int)", fid)
return ret
def get_server_gubun(self):
ret = self.dynamicCall("KOA_Functions(QString, QString)", "GetServerGubun", "")
return ret
def _receive_chejan_data(self, gubun, item_cnt, fid_list):
print(gubun)
print(self.get_chejan_data(9203))
print(self.get_chejan_data(302))
print(self.get_chejan_data(900))
print(self.get_chejan_data(901))
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
elif rqname == "opw00001_req":
self._opw00001(rqname, trcode)
elif rqname == "opw00018_req":
self._opw00018(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
@staticmethod
def change_format(data):
strip_data = data.lstrip('-0')
if strip_data == '' or strip_data == '.00':
strip_data = '0'
try:
format_data = format(int(strip_data), ',d')
except:
format_data = format(float(strip_data))
if data.startswith('-'):
format_data = '-' + format_data
return format_data
@staticmethod
def change_format2(data):
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
if strip_data.startswith('.'):
strip_data = '0' + strip_data
if data.startswith('-'):
strip_data = '-' + strip_data
return strip_data
def _opw00001(self, rqname, trcode):
d2_deposit = self._comm_get_data(trcode, "", rqname, 0, "d+2추정예수금")
self.d2_deposit = Kiwoom.change_format(d2_deposit)
def _opt10081(self, rqname, trcode):
data_cnt = self._get_repeat_cnt(trcode, rqname)
for i in range(data_cnt):
date = self._comm_get_data(trcode, "", rqname, i, "일자")
open = self._comm_get_data(trcode, "", rqname, i, "시가")
high = self._comm_get_data(trcode, "", rqname, i, "고가")
low = self._comm_get_data(trcode, "", rqname, i, "저가")
close = self._comm_get_data(trcode, "", rqname, i, "현재가")
volume = self._comm_get_data(trcode, "", rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
def reset_opw00018_output(self):
self.opw00018_output = {'single': [], 'multi': []}
def _opw00018(self, rqname, trcode):
# single data
total_purchase_price = self._comm_get_data(trcode, "", rqname, 0, "총매입금액")
total_eval_price = self._comm_get_data(trcode, "", rqname, 0, "총평가금액")
total_eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, 0, "총평가손익금액")
total_earning_rate = self._comm_get_data(trcode, "", rqname, 0, "총수익률(%)")
estimated_deposit = self._comm_get_data(trcode, "", rqname, 0, "추정예탁자산")
self.opw00018_output['single'].append(Kiwoom.change_format(total_purchase_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_profit_loss_price))
total_earning_rate = Kiwoom.change_format(total_earning_rate)
if self.get_server_gubun():
total_earning_rate = float(total_earning_rate) / 100
total_earning_rate = str(total_earning_rate)
self.opw00018_output['single'].append(total_earning_rate)
self.opw00018_output['single'].append(Kiwoom.change_format(estimated_deposit))
# multi data
rows = self._get_repeat_cnt(trcode, rqname)
for i in range(rows):
name = self._comm_get_data(trcode, "", rqname, i, "종목명")
quantity = self._comm_get_data(trcode, "", rqname, i, "보유수량")
purchase_price = self._comm_get_data(trcode, "", rqname, i, "매입가")
current_price = self._comm_get_data(trcode, "", rqname, i, "현재가")
eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, i, "평가손익")
earning_rate = self._comm_get_data(trcode, "", rqname, i, "수익률(%)")
quantity = Kiwoom.change_format(quantity)
purchase_price = Kiwoom.change_format(purchase_price)
current_price = Kiwoom.change_format(current_price)
eval_profit_loss_price = Kiwoom.change_format(eval_profit_loss_price)
earning_rate = Kiwoom.change_format2(earning_rate)
self.opw00018_output['multi'].append([name, quantity, purchase_price, current_price, eval_profit_loss_price,
earning_rate])
if __name__ == "__main__":
app = QApplication(sys.argv)
kiwoom = Kiwoom()
kiwoom.comm_connect()
kiwoom.reset_opw00018_output()
account_number = kiwoom.get_login_info("ACCNO")
account_number = account_number.split(';')[0]
kiwoom.set_input_value("계좌번호", account_number)
kiwoom.comm_rq_data("opw00018_req", "opw00018", 0, "2000")
print(kiwoom.opw00018_output['single'])
print(kiwoom.opw00018_output['multi'])
| mit |
mattjj/pyhsmm-factorial | example.py | 2 | 2924 | from __future__ import division
import numpy as np
np.seterr(divide='ignore')
from matplotlib import pyplot as plt
import pyhsmm
from pyhsmm.util.text import progprint_xrange
import models
import util as futil
T = 400
Nmax = 10
# observation distributions used to generate data
true_obsdistns_chain1 = [
pyhsmm.basic.distributions.ScalarGaussianNonconjNIX(
None,None,None,None, # no hyperparameters since we won't resample
mu=0,sigmasq=0.01),
pyhsmm.basic.distributions.ScalarGaussianNonconjNIX(
None,None,None,None,
mu=10,sigmasq=0.01),
]
true_obsdistns_chain2 = [
pyhsmm.basic.distributions.ScalarGaussianNonconjNIX(
None,None,None,None,
mu=20,sigmasq=0.01),
pyhsmm.basic.distributions.ScalarGaussianNonconjNIX(
None,None,None,None,
mu=30,sigmasq=0.01),
]
# observation hyperparameters used during inference
obshypparamss = [
dict(mu_0=5.,tausq_0=10.**2,sigmasq_0=0.01,nu_0=100.),
dict(mu_0=25.,tausq_0=10.**2,sigmasq_0=0.01,nu_0=100.),
]
# duration hyperparameters used both for data generation and inference
durhypparamss = [
dict(alpha_0=20*20,beta_0=20.),
dict(alpha_0=20*75,beta_0=20.),
]
truemodel = models.Factorial([models.FactorialComponentHSMM(
init_state_concentration=2.,
alpha=2.,gamma=4.,
obs_distns=od,
dur_distns=[pyhsmm.basic.distributions.PoissonDuration(**durhypparams) for hi in range(len(od))])
for od,durhypparams in zip([true_obsdistns_chain1,true_obsdistns_chain2],durhypparamss)])
sumobs, allobs, allstates = truemodel.generate(T)
plt.figure(); plt.plot(sumobs); plt.title('summed data')
plt.figure(); plt.plot(truemodel.states_list[0].museqs); plt.title('true decomposition')
### estimate changepoints (threshold should probably be a function of the empirical variance, or something)
changepoints = futil.indicators_to_changepoints(np.concatenate(((0,),np.abs(np.diff(sumobs)) > 1)))
futil.plot_with_changepoints(sumobs,changepoints)
### construct posterior model
posteriormodel = models.Factorial([models.FactorialComponentHSMMPossibleChangepoints(
init_state_concentration=2.,
alpha=1.,gamma=4.,
obs_distns=[pyhsmm.basic.distributions.ScalarGaussianNonconjNIX(**obshypparams) for idx in range(Nmax)],
dur_distns=[pyhsmm.basic.distributions.PoissonDuration(**durhypparams) for idx in range(Nmax)],
trunc=200)
for obshypparams, durhypparams in zip(obshypparamss,durhypparamss)])
posteriormodel.add_data(data=sumobs,changepoints=changepoints)
nsubiter=25
for itr in progprint_xrange(10):
posteriormodel.resample_model(min_extra_noise=0.1,max_extra_noise=100.**2,niter=nsubiter)
plt.figure(); plt.plot(posteriormodel.states_list[0].museqs);
plt.title('sampled after %d iterations' % ((itr+1)))
plt.show()
| mit |
wood-b/CompBook | project1/sectD/ete_hist.py | 2 | 1207 | import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy import stats
__author__ = "Brandon Wood"
file = open(sys.argv[1])
HEADER_LINES = 0
for x in range(HEADER_LINES):
file.readline()
ete_list = []
for line in file:
tokens = line.split()
ete_list.append(float(tokens[0]))
plt.rc('savefig',dpi=500)
#print ete_list
"""____________________________Stats__________________________________"""
avg = np.average(ete_list)
print avg
#print ete_list[1]
#mode = stats.mode(ete_list)
#print mode
stdev = np.std(ete_list)
print stdev
ete_sq = np.square(ete_list)
mean_sq_ete = np.average(ete_sq)
print mean_sq_ete
#print ete_sq[1]
#plot root mean squared
#rms = np.sqrt(ete_sq)
#rms_avg = np.average(rms)
#rms_stdev = np.std(rms)
#print rms_avg
#print rms_stdev
"""_____________________________Histogram_______________________________"""
plt.figure()
plt.ylabel('$P(R)$')
plt.xlabel('$end-to-end$ $distance$ $R$$(\AA)$')
n, bins, patches = plt.hist(ete_list, 75, normed=1, alpha=0.5)
y = mlab.normpdf(bins, avg, stdev)
plt.plot(bins, y, 'r--')
plt.legend(['$Gaussian$ $Distribution$'], loc='best')
plt.savefig('ete_hist.png', bbox_inches='tight')
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
moonbury/pythonanywhere | github/MasteringMLWithScikit-learn/8365OS_07_Codes/pca-3d-plot.py | 3 | 1392 | import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = pl.figure(1, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=pl.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
pl.show() | gpl-3.0 |
meduz/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
ky822/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
heplesser/nest-simulator | pynest/examples/spatial/ctx_2n.py | 20 | 2192 | # -*- coding: utf-8 -*-
#
# ctx_2n.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create a 4x3 grid with one pyramidal cell and one interneuron at each position
-------------------------------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
pos = nest.spatial.grid(shape=[4, 3], extent=[2., 1.5])
ctx_pyr = nest.Create('pyr', positions=pos)
ctx_in = nest.Create('in', positions=pos)
nest.PrintNodes()
# extract position information
ppyr = nest.GetPosition(ctx_pyr)
pin = nest.GetPosition(ctx_in)
ppyr_x = np.array([x for x, y in ppyr])
ppyr_y = np.array([y for x, y in ppyr])
pin_x = np.array([x for x, y in pin])
pin_y = np.array([y for x, y in pin])
# plot
plt.clf()
plt.plot(pin_x - 0.05, ppyr_y - 0.05, 'bo', markersize=20,
label='Pyramidal', zorder=2)
plt.plot(pin_x + 0.05, pin_y + 0.05, 'ro', markersize=20,
label='Interneuron', zorder=2)
plt.plot(pin_x, ppyr_y, 'o', markerfacecolor=(0.7, 0.7, 0.7),
markersize=60, markeredgewidth=0, zorder=1, label='_nolegend_')
# beautify
plt.axis([-1.0, 1.0, -1.0, 1.0])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5')
plt.ylabel('3 Rows, Extent: 1.0')
plt.legend(numpoints=1)
plt.show()
# plt.savefig('ctx_2n.png')
| gpl-2.0 |
lilleswing/deepchem | deepchem/data/datasets.py | 1 | 99890 | """
Contains wrapper class for datasets.
"""
import json
import os
import math
import random
import logging
import tempfile
import time
import shutil
import multiprocessing
from ast import literal_eval as make_tuple
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import deepchem as dc
from deepchem.utils.typing import OneOrMany, Shape
from deepchem.utils.data_utils import save_to_disk, load_from_disk, load_image_files
Batch = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
logger = logging.getLogger(__name__)
def sparsify_features(X: np.ndarray) -> np.ndarray:
"""Extracts a sparse feature representation from dense feature array.
Parameters
----------
X: np.ndarray
A numpy array of shape `(n_samples, ...)`.
Returns
-------
X_sparse: np.ndarray
A numpy array with `dtype=object` where `X_sparse[i]` is a
typle of `(nonzero_inds, nonzero_vals)` with nonzero indices and
values in the i-th sample of `X`.
"""
n_samples = len(X)
X_sparse = []
for i in range(n_samples):
nonzero_inds = np.nonzero(X[i])[0]
nonzero_vals = X[i][nonzero_inds]
X_sparse.append((nonzero_inds, nonzero_vals))
X_sparse = np.array(X_sparse, dtype=object)
return X_sparse
def densify_features(X_sparse: np.ndarray, num_features: int) -> np.ndarray:
"""Expands sparse feature representation to dense feature array.
Assumes that the sparse representation was constructed from an array
which had original shape `(n_samples, num_features)` so doesn't
support reconstructing multidimensional dense arrays.
Parameters
----------
X_sparse: np.ndarray
Must have `dtype=object`. `X_sparse[i]` must be a tuple of nonzero
indices and values.
num_features: int
Number of features in dense array.
Returns
-------
X: np.ndarray
A numpy array of shape `(n_samples, num_features)`.
"""
n_samples = len(X_sparse)
X = np.zeros((n_samples, num_features))
for i in range(n_samples):
nonzero_inds, nonzero_vals = X_sparse[i]
X[i][nonzero_inds.astype(int)] = nonzero_vals
return X
def pad_features(batch_size: int, X_b: np.ndarray) -> np.ndarray:
"""Pads a batch of features to have precisely batch_size elements.
Given an array of features with length less than or equal to
batch-size, pads it to `batch_size` length. It does this by
repeating the original features in tiled fashion. For illustration,
suppose that `len(X_b) == 3` and `batch_size == 10`.
>>> X_b = np.arange(3)
>>> X_b
array([0, 1, 2])
>>> batch_size = 10
>>> X_manual = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0])
>>> X_out = pad_features(batch_size, X_b)
>>> assert (X_manual == X_out).all()
This function is similar to `pad_batch` but doesn't handle labels
`y` or weights `w` and is intended to be used for inference-time
query processing.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
Returns
-------
X_out: np.ndarray
A numpy array with `len(X_out) == batch_size`.
"""
num_samples = len(X_b)
if num_samples > batch_size:
raise ValueError("Cannot pad an array longer than `batch_size`")
elif num_samples == batch_size:
return X_b
else:
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
# Fill in batch arrays
start = 0
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
start += increment
return X_out
def pad_batch(batch_size: int, X_b: np.ndarray, y_b: np.ndarray,
w_b: np.ndarray, ids_b: np.ndarray) -> Batch:
"""Pads batch to have size precisely batch_size elements.
Given arrays of features `X_b`, labels `y_b`, weights `w_b`, and
identifiers `ids_b` all with length less than or equal to
batch-size, pads them to `batch_size` length. It does this by
repeating the original entries in tiled fashion. Note that `X_b,
y_b, w_b, ids_b` must all have the same length.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
y_b: np.ndarray
Must be such that `len(y_b) <= batch_size`
w_b: np.ndarray
Must be such that `len(w_b) <= batch_size`
ids_b: np.ndarray
Must be such that `len(ids_b) <= batch_size`
Returns
-------
Batch
The batch is a tuple of `(X_out, y_out, w_out, ids_out)`,
all numpy arrays with length `batch_size`.
"""
num_samples = len(X_b)
if num_samples == batch_size:
return (X_b, y_b, w_b, ids_b)
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
if y_b is None:
y_out = None
elif len(y_b.shape) < 2:
y_out = np.zeros(batch_size, dtype=y_b.dtype)
else:
y_out = np.zeros((batch_size,) + y_b.shape[1:], dtype=y_b.dtype)
if w_b is None:
w_out = None
elif len(w_b.shape) < 2:
w_out = np.zeros(batch_size, dtype=w_b.dtype)
else:
w_out = np.zeros((batch_size,) + w_b.shape[1:], dtype=w_b.dtype)
ids_out = np.zeros((batch_size,), dtype=ids_b.dtype)
# Fill in batch arrays
start = 0
# Only the first set of copy will be counted in training loss
if w_out is not None:
w_out[start:start + num_samples] = w_b[:]
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
if y_out is not None:
y_out[start:start + increment] = y_b[:increment]
ids_out[start:start + increment] = ids_b[:increment]
start += increment
return (X_out, y_out, w_out, ids_out)
class Dataset(object):
"""Abstract base class for datasets defined by X, y, w elements.
`Dataset` objects are used to store representations of a dataset as
used in a machine learning task. Datasets contain features `X`,
labels `y`, weights `w` and identifiers `ids`. Different subclasses
of `Dataset` may choose to hold `X, y, w, ids` in memory or on disk.
The `Dataset` class attempts to provide for strong interoperability
with other machine learning representations for datasets.
Interconversion methods allow for `Dataset` objects to be converted
to and from numpy arrays, pandas dataframes, tensorflow datasets,
and pytorch datasets (only to and not from for pytorch at present).
Note that you can never instantiate a `Dataset` object directly.
Instead you will need to instantiate one of the concrete subclasses.
"""
def __init__(self) -> None:
raise NotImplementedError()
def __len__(self) -> int:
"""Get the number of elements in the dataset.
Returns
-------
int
The number of elements in the dataset.
"""
raise NotImplementedError()
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids
arrays.
Returns
-------
Tuple
The tuple contains four elements, which are the shapes of
the X, y, w, and ids arrays.
"""
raise NotImplementedError()
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
raise NotImplementedError()
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `X`.
Notes
-----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `y`.
Notes
-----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `ids`.
Notes
-----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of weights `w`.
Notes
-----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
def __repr__(self) -> str:
"""Convert self to REPL print representation."""
threshold = dc.utils.get_print_threshold()
task_str = np.array2string(
np.array(self.get_task_names()), threshold=threshold)
X_shape, y_shape, w_shape, _ = self.get_shape()
if self.__len__() < dc.utils.get_max_print_size():
id_str = np.array2string(self.ids, threshold=threshold)
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, ids: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape), str(w_shape),
id_str, task_str)
else:
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape), str(w_shape),
task_str)
def __str__(self) -> str:
"""Convert self to str representation."""
return self.__repr__()
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, optional (default 1)
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
raise NotImplementedError()
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
raise NotImplementedError()
def transform(self, transformer: "dc.trans.Transformer", **args) -> "Dataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
Returns
-------
Dataset
A newly constructed Dataset object.
"""
raise NotImplementedError()
def select(self, indices: Sequence[int],
select_dir: Optional[str] = None) -> "Dataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
"""
raise NotImplementedError()
def get_statistics(self, X_stats: bool = True,
y_stats: bool = True) -> Tuple[float, ...]:
"""Compute and return statistics of this dataset.
Uses `self.itersamples()` to compute means and standard deviations
of the dataset. Can compute on large datasets that don't fit in
memory.
Parameters
----------
X_stats: bool, optional (default True)
If True, compute feature-level mean and standard deviations.
y_stats: bool, optional (default True)
If True, compute label-level mean and standard deviations.
Returns
-------
Tuple
If `X_stats == True`, returns `(X_means, X_stds)`. If `y_stats == True`,
returns `(y_means, y_stds)`. If both are true, returns
`(X_means, X_stds, y_means, y_stds)`.
"""
X_means = 0.0
X_m2 = 0.0
y_means = 0.0
y_m2 = 0.0
n = 0
for X, y, _, _ in self.itersamples():
n += 1
if X_stats:
dx = X - X_means
X_means += dx / n
X_m2 += dx * (X - X_means)
if y_stats:
dy = y - y_means
y_means += dy / n
y_m2 += dy * (y - y_means)
if n < 2:
X_stds = 0.0
y_stds = 0
else:
X_stds = np.sqrt(X_m2 / n)
y_stds = np.sqrt(y_m2 / n)
if X_stats and not y_stats:
return X_means, X_stds
elif y_stats and not X_stats:
return y_means, y_stds
elif X_stats and y_stats:
return X_means, X_stds, y_means, y_stds
else:
return tuple()
def make_tf_dataset(self,
batch_size: int = 100,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False):
"""Create a tf.data.Dataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y,
w) for one batch.
Parameters
----------
batch_size: int, default 100
The number of samples to include in each batch.
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
pad_batches: bool, default False
If True, batches are padded as necessary to make the size of
each batch exactly equal batch_size.
Returns
-------
tf.data.Dataset
TensorFlow Dataset that iterates over the same data.
Notes
-----
This class requires TensorFlow to be installed.
"""
try:
import tensorflow as tf
except:
raise ImportError("This method requires TensorFlow to be installed.")
# Retrieve the first sample so we can determine the dtypes.
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype), tf.as_dtype(w.dtype))
shapes = (tf.TensorShape([None] + list(X.shape)),
tf.TensorShape([None] + list(y.shape)),
tf.TensorShape([None] + list(w.shape)))
# Create a Tensorflow Dataset.
def gen_data():
for X, y, w, ids in self.iterbatches(batch_size, epochs, deterministic,
pad_batches):
yield (X, y, w)
return tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Notes
-----
This class requires PyTorch to be installed.
"""
raise NotImplementedError()
def to_dataframe(self) -> pd.DataFrame:
"""Construct a pandas DataFrame containing the data from this Dataset.
Returns
-------
pd.DataFrame
Pandas dataframe. If there is only a single feature per datapoint,
will have column "X" else will have columns "X1,X2,..." for
features. If there is only a single label per datapoint, will
have column "y" else will have columns "y1,y2,..." for labels. If
there is only a single weight per datapoint will have column "w"
else will have columns "w1,w2,...". Will have column "ids" for
identifiers.
"""
X = self.X
y = self.y
w = self.w
ids = self.ids
if len(X.shape) == 1 or X.shape[1] == 1:
columns = ['X']
else:
columns = [f'X{i+1}' for i in range(X.shape[1])]
X_df = pd.DataFrame(X, columns=columns)
if len(y.shape) == 1 or y.shape[1] == 1:
columns = ['y']
else:
columns = [f'y{i+1}' for i in range(y.shape[1])]
y_df = pd.DataFrame(y, columns=columns)
if len(w.shape) == 1 or w.shape[1] == 1:
columns = ['w']
else:
columns = [f'w{i+1}' for i in range(w.shape[1])]
w_df = pd.DataFrame(w, columns=columns)
ids_df = pd.DataFrame(ids, columns=['ids'])
return pd.concat([X_df, y_df, w_df, ids_df], axis=1, sort=False)
@staticmethod
def from_dataframe(df: pd.DataFrame,
X: Optional[OneOrMany[str]] = None,
y: Optional[OneOrMany[str]] = None,
w: Optional[OneOrMany[str]] = None,
ids: Optional[str] = None):
"""Construct a Dataset from the contents of a pandas DataFrame.
Parameters
----------
df: pd.DataFrame
The pandas DataFrame
X: str or List[str], optional (default None)
The name of the column or columns containing the X array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
y: str or List[str], optional (default None)
The name of the column or columns containing the y array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
w: str or List[str], optional (default None)
The name of the column or columns containing the w array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
ids: str, optional (default None)
The name of the column containing the ids. If this is None, it
will look for default column names that match those produced by
to_dataframe().
"""
# Find the X values.
if X is not None:
X_val = df[X]
elif 'X' in df.columns:
X_val = df['X']
else:
columns = []
i = 1
while f'X{i}' in df.columns:
columns.append(f'X{i}')
i += 1
X_val = df[columns]
if len(X_val.shape) == 1:
X_val = np.expand_dims(X_val, 1)
# Find the y values.
if y is not None:
y_val = df[y]
elif 'y' in df.columns:
y_val = df['y']
else:
columns = []
i = 1
while f'y{i}' in df.columns:
columns.append(f'y{i}')
i += 1
y_val = df[columns]
if len(y_val.shape) == 1:
y_val = np.expand_dims(y_val, 1)
# Find the w values.
if w is not None:
w_val = df[w]
elif 'w' in df.columns:
w_val = df['w']
else:
columns = []
i = 1
while f'w{i}' in df.columns:
columns.append(f'w{i}')
i += 1
w_val = df[columns]
if len(w_val.shape) == 1:
w_val = np.expand_dims(w_val, 1)
# Find the ids.
if ids is not None:
ids_val = df[ids]
elif 'ids' in df.columns:
ids_val = df['ids']
else:
ids_val = None
return NumpyDataset(X_val, y_val, w_val, ids_val)
class NumpyDataset(Dataset):
"""A Dataset defined by in-memory numpy arrays.
This subclass of `Dataset` stores arrays `X,y,w,ids` in memory as
numpy arrays. This makes it very easy to construct `NumpyDataset`
objects.
Examples
--------
>>> import numpy as np
>>> dataset = NumpyDataset(X=np.random.rand(5, 3), y=np.random.rand(5,), ids=np.arange(5))
"""
def __init__(self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None,
n_tasks: int = 1) -> None:
"""Initialize this object.
Parameters
----------
X: np.ndarray
Input features. A numpy array of shape `(n_samples,...)`.
y: np.ndarray, optional (default None)
Labels. A numpy array of shape `(n_samples, ...)`. Note that each label can
have an arbitrary shape.
w: np.ndarray, optional (default None)
Weights. Should either be 1D array of shape `(n_samples,)` or if
there's more than one task, of shape `(n_samples, n_tasks)`.
ids: np.ndarray, optional (default None)
Identifiers. A numpy array of shape `(n_samples,)`
n_tasks: int, default 1
Number of learning tasks.
"""
n_samples = len(X)
if n_samples > 0:
if y is None:
# Set labels to be zero, with zero weights
y = np.zeros((n_samples, n_tasks), np.float32)
w = np.zeros((n_samples, 1), np.float32)
if ids is None:
ids = np.arange(n_samples)
if not isinstance(X, np.ndarray):
X = np.array(X)
if not isinstance(y, np.ndarray):
y = np.array(y)
if w is None:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
if not isinstance(w, np.ndarray):
w = np.array(w)
self._X = X
self._y = y
self._w = w
self._ids = np.array(ids, dtype=object)
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return len(self._y)
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X.shape, self._y.shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y.shape) < 2:
return np.array([0])
return np.arange(self._y.shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
return self._X
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
return self._y
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset: NumpyDataset, batch_size: Optional[int], epochs: int,
deterministic: bool, pad_batches: bool):
n_samples = dataset._X.shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
n_samples = self._X.shape[0]
return ((self._X[i], self._y[i], self._w[i], self._ids[i])
for i in range(n_samples))
def transform(self, transformer: "dc.trans.Transformer",
**args) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self._X, self._y, self._w, self._ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self, indices: Sequence[int],
select_dir: Optional[str] = None) -> "NumpyDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: List[int]
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`NumpyDataset` is purely in-memory.
Returns
-------
NumpyDataset
A selected NumpyDataset object
"""
X = self.X[indices]
y = self.y[indices]
w = self.w[indices]
ids = self.ids[indices]
return NumpyDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Notes
-----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchNumpyDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchNumpyDataset(
numpy_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_DiskDataset(ds: "DiskDataset") -> "NumpyDataset":
"""Convert DiskDataset to NumpyDataset.
Parameters
----------
ds: DiskDataset
DiskDataset to transform to NumpyDataset.
Returns
-------
NumpyDataset
A new NumpyDataset created from DiskDataset.
"""
return NumpyDataset(ds.X, ds.y, ds.w, ds.ids)
@staticmethod
def to_json(self, fname: str) -> None:
"""Dump NumpyDataset to the json file .
Parameters
----------
fname: str
The name of the json file.
"""
d = {
'X': self.X.tolist(),
'y': self.y.tolist(),
'w': self.w.tolist(),
'ids': self.ids.tolist()
}
with open(fname, 'w') as fout:
json.dump(d, fout)
@staticmethod
def from_json(fname: str) -> "NumpyDataset":
"""Create NumpyDataset from the json file.
Parameters
----------
fname: str
The name of the json file.
Returns
-------
NumpyDataset
A new NumpyDataset created from the json file.
"""
with open(fname) as fin:
d = json.load(fin)
return NumpyDataset(d['X'], d['y'], d['w'], d['ids'])
@staticmethod
def merge(datasets: Sequence[Dataset]) -> "NumpyDataset":
"""Merge multiple NumpyDatasets.
Parameters
----------
datasets: List[Dataset]
List of datasets to merge.
Returns
-------
NumpyDataset
A single NumpyDataset containing all the samples from all datasets.
"""
X, y, w, ids = datasets[0].X, datasets[0].y, datasets[0].w, datasets[0].ids
for dataset in datasets[1:]:
X = np.concatenate([X, dataset.X], axis=0)
y = np.concatenate([y, dataset.y], axis=0)
w = np.concatenate([w, dataset.w], axis=0)
ids = np.concatenate(
[ids, dataset.ids],
axis=0,
)
return NumpyDataset(X, y, w, ids, n_tasks=y.shape[1])
class _Shard(object):
def __init__(self, X, y, w, ids):
self.X = X
self.y = y
self.w = w
self.ids = ids
class DiskDataset(Dataset):
"""
A Dataset that is stored as a set of files on disk.
The DiskDataset is the workhorse class of DeepChem that facilitates analyses
on large datasets. Use this class whenever you're working with a large
dataset that can't be easily manipulated in RAM.
On disk, a `DiskDataset` has a simple structure. All files for a given
`DiskDataset` are stored in a `data_dir`. The contents of `data_dir` should
be laid out as follows:
data_dir/
|
---> metadata.csv.gzip
|
---> tasks.json
|
---> shard-0-X.npy
|
---> shard-0-y.npy
|
---> shard-0-w.npy
|
---> shard-0-ids.npy
|
---> shard-1-X.npy
.
.
.
The metadata is constructed by static method
`DiskDataset._construct_metadata` and saved to disk by
`DiskDataset._save_metadata`. The metadata itself consists of a csv file
which has columns `('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')`. `tasks.json` consists of a list of task names for this dataset.
The actual data is stored in `.npy` files (numpy array files) of the form
'shard-0-X.npy', 'shard-0-y.npy', etc.
The basic structure of `DiskDataset` is quite robust and will likely serve
you well for datasets up to about 100 GB or larger. However note that
`DiskDataset` has not been tested for very large datasets at the terabyte
range and beyond. You may be better served by implementing a custom
`Dataset` class for those use cases.
Examples
--------
Let's walk through a simple example of constructing a new `DiskDataset`.
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
If you have already saved a `DiskDataset` to `data_dir`, you can reinitialize it with
>> data_dir = "/path/to/my/data"
>> dataset = dc.data.DiskDataset(data_dir)
Once you have a dataset you can access its attributes as follows
>>> X = np.random.rand(10, 10)
>>> y = np.random.rand(10,)
>>> w = np.ones_like(y)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> X, y, w = dataset.X, dataset.y, dataset.w
One thing to beware of is that `dataset.X`, `dataset.y`, `dataset.w` are
loading data from disk! If you have a large dataset, these operations can be
extremely slow. Instead try iterating through the dataset instead.
>>> for (xi, yi, wi, idi) in dataset.itersamples():
... pass
Attributes
----------
data_dir: str
Location of directory where this `DiskDataset` is stored to disk
metadata_df: pd.DataFrame
Pandas Dataframe holding metadata for this `DiskDataset`
legacy_metadata: bool
Whether this `DiskDataset` uses legacy format.
Notes
-----
`DiskDataset` originally had a simpler metadata format without shape
information. Older `DiskDataset` objects had metadata files with columns
`('ids', 'X', 'y', 'w') and not additional shape columns. `DiskDataset`
maintains backwards compatibility with this older metadata format, but we
recommend for performance reasons not using legacy metadata for new
projects.
"""
def __init__(self, data_dir: str) -> None:
"""Load a constructed DiskDataset from disk
Note that this method cannot construct a new disk dataset. Instead use
static methods `DiskDataset.create_dataset` or `DiskDataset.from_numpy`
for that purpose. Use this constructor instead to load a `DiskDataset`
that has already been created on disk.
Parameters
----------
data_dir: str
Location on disk of an existing `DiskDataset`.
"""
self.data_dir = data_dir
logger.info("Loading dataset from disk.")
self.tasks, self.metadata_df = self.load_metadata()
if len(self.metadata_df.columns) == 4 and list(
self.metadata_df.columns) == ['ids', 'X', 'y', 'w']:
logger.info(
"Detected legacy metatadata on disk. You can upgrade from legacy metadata "
"to the more efficient current metadata by resharding this dataset "
"by calling the reshard() method of this object.")
self.legacy_metadata = True
elif len(self.metadata_df.columns) == 8 and list(
self.metadata_df.columns) == [
'ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape', 'w_shape'
]: # noqa
self.legacy_metadata = False
else:
raise ValueError(
"Malformed metadata on disk. Metadata must have columns 'ids', 'X', 'y', 'w', "
"'ids_shape', 'X_shape', 'y_shape', 'w_shape' (or if in legacy metadata format,"
"columns 'ids', 'X', 'y', 'w')")
self._cached_shards: Optional[List] = None
self._memory_cache_size = 20 * (1 << 20) # 20 MB
self._cache_used = 0
@staticmethod
def create_dataset(shard_generator: Iterable[Batch],
data_dir: Optional[str] = None,
tasks: Optional[Sequence] = []) -> "DiskDataset":
"""Creates a new DiskDataset
Parameters
----------
shard_generator: Iterable[Batch]
An iterable (either a list or generator) that provides tuples of data
(X, y, w, ids). Each tuple will be written to a separate shard on disk.
data_dir: str, optional (default None)
Filename for data directory. Creates a temp directory if none specified.
tasks: Sequence, optional (default [])
List of tasks for this dataset.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the given data
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
elif not os.path.exists(data_dir):
os.makedirs(data_dir)
metadata_rows = []
time1 = time.time()
for shard_num, (X, y, w, ids) in enumerate(shard_generator):
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(data_dir, basename, tasks, X, y, w,
ids))
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, data_dir, tasks)
time2 = time.time()
logger.info("TIMING: dataset construction took %0.3f s" % (time2 - time1))
return DiskDataset(data_dir)
def load_metadata(self) -> Tuple[List[str], pd.DataFrame]:
"""Helper method that loads metadata from disk."""
try:
tasks_filename, metadata_filename = self._get_metadata_filename()
with open(tasks_filename) as fin:
tasks = json.load(fin)
metadata_df = pd.read_csv(metadata_filename, compression='gzip')
metadata_df = metadata_df.where((pd.notnull(metadata_df)), None)
return tasks, metadata_df
except Exception:
pass
# Load obsolete format -> save in new format
metadata_filename = os.path.join(self.data_dir, "metadata.joblib")
if os.path.exists(metadata_filename):
tasks, metadata_df = load_from_disk(metadata_filename)
del metadata_df['task_names']
del metadata_df['basename']
DiskDataset._save_metadata(metadata_df, self.data_dir, tasks)
return tasks, metadata_df
raise ValueError("No Metadata Found On Disk")
@staticmethod
def _save_metadata(metadata_df: pd.DataFrame, data_dir: str,
tasks: Optional[Sequence]) -> None:
"""Saves the metadata for a DiskDataset
Parameters
----------
metadata_df: pd.DataFrame
The dataframe which will be written to disk.
data_dir: str
Directory to store metadata.
tasks: Sequence, optional
Tasks of DiskDataset. If `None`, an empty list of tasks is written to
disk.
"""
if tasks is None:
tasks = []
elif isinstance(tasks, np.ndarray):
tasks = tasks.tolist()
metadata_filename = os.path.join(data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(data_dir, "tasks.json")
with open(tasks_filename, 'w') as fout:
json.dump(tasks, fout)
metadata_df.to_csv(metadata_filename, index=False, compression='gzip')
@staticmethod
def _construct_metadata(metadata_entries: List) -> pd.DataFrame:
"""Construct a dataframe containing metadata.
Parameters
----------
metadata_entries: List
`metadata_entries` should have elements returned by write_data_to_disk
above.
Returns
-------
pd.DataFrame
A Pandas Dataframe object contains metadata.
"""
columns = ('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')
metadata_df = pd.DataFrame(metadata_entries, columns=columns)
return metadata_df
@staticmethod
def write_data_to_disk(
data_dir: str,
basename: str,
tasks: np.ndarray,
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> List[Optional[str]]:
"""Static helper method to write data to disk.
This helper method is used to write a shard of data to disk.
Parameters
----------
data_dir: str
Data directory to write shard to.
basename: str
Basename for the shard in question.
tasks: np.ndarray
The names of the tasks in question.
X: np.ndarray, optional (default None)
The features array.
y: np.ndarray, optional (default None)
The labels array.
w: np.ndarray, optional (default None)
The weights array.
ids: np.ndarray, optional (default None)
The identifiers array.
Returns
-------
List[Optional[str]]
List with values `[out_ids, out_X, out_y, out_w, out_ids_shape,
out_X_shape, out_y_shape, out_w_shape]` with filenames of locations to
disk which these respective arrays were written.
"""
if X is not None:
out_X: Optional[str] = "%s-X.npy" % basename
save_to_disk(X, os.path.join(data_dir, out_X)) # type: ignore
out_X_shape = X.shape
else:
out_X = None
out_X_shape = None
if y is not None:
out_y: Optional[str] = "%s-y.npy" % basename
save_to_disk(y, os.path.join(data_dir, out_y)) # type: ignore
out_y_shape = y.shape
else:
out_y = None
out_y_shape = None
if w is not None:
out_w: Optional[str] = "%s-w.npy" % basename
save_to_disk(w, os.path.join(data_dir, out_w)) # type: ignore
out_w_shape = w.shape
else:
out_w = None
out_w_shape = None
if ids is not None:
out_ids: Optional[str] = "%s-ids.npy" % basename
save_to_disk(ids, os.path.join(data_dir, out_ids)) # type: ignore
out_ids_shape = ids.shape
else:
out_ids = None
out_ids_shape = None
# note that this corresponds to the _construct_metadata column order
return [
out_ids, out_X, out_y, out_w, out_ids_shape, out_X_shape, out_y_shape,
out_w_shape
]
def save_to_disk(self) -> None:
"""Save dataset to disk."""
DiskDataset._save_metadata(self.metadata_df, self.data_dir, self.tasks)
self._cached_shards = None
def move(self, new_data_dir: str,
delete_if_exists: Optional[bool] = True) -> None:
"""Moves dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to move this to dataset to.
delete_if_exists: bool, optional (default True)
If this option is set, delete the destination directory if it exists
before moving. This is set to True by default to be backwards compatible
with behavior in earlier versions of DeepChem.
Notes
-----
This is a stateful operation! `self.data_dir` will be moved into
`new_data_dir`. If `delete_if_exists` is set to `True` (by default this is
set `True`), then `new_data_dir` is deleted if it's a pre-existing
directory.
"""
if delete_if_exists and os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.move(self.data_dir, new_data_dir)
if delete_if_exists:
self.data_dir = new_data_dir
else:
self.data_dir = os.path.join(new_data_dir,
os.path.basename(self.data_dir))
def copy(self, new_data_dir: str) -> "DiskDataset":
"""Copies dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to copy this to dataset to.
Returns
-------
DiskDataset
A copied DiskDataset object.
Notes
-----
This is a stateful operation! Any data at `new_data_dir` will be deleted
and `self.data_dir` will be deep copied into `new_data_dir`.
"""
if os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.copytree(self.data_dir, new_data_dir)
return DiskDataset(new_data_dir)
def get_task_names(self) -> np.ndarray:
"""Gets learning tasks associated with this dataset."""
return self.tasks
def reshard(self, shard_size: int) -> None:
"""Reshards data to have specified shard size.
Parameters
----------
shard_size: int
The size of shard.
Examples
--------
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(100, 10)
>>> d = dc.data.DiskDataset.from_numpy(X)
>>> d.reshard(shard_size=10)
>>> d.get_number_shards()
10
Notes
-----
If this `DiskDataset` is in `legacy_metadata` format, reshard will
convert this dataset to have non-legacy metadata.
"""
# Create temp directory to store resharded version
reshard_dir = tempfile.mkdtemp()
n_shards = self.get_number_shards()
# Get correct shapes for y/w
tasks = self.get_task_names()
_, y_shape, w_shape, _ = self.get_shape()
if len(y_shape) == 1:
y_shape = (len(y_shape), len(tasks))
if len(w_shape) == 1:
w_shape = (len(w_shape), len(tasks))
# Write data in new shards
def generator():
X_next = np.zeros((0,) + self.get_data_shape())
y_next = np.zeros((0,) + y_shape[1:])
w_next = np.zeros((0,) + w_shape[1:])
ids_next = np.zeros((0,), dtype=object)
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
logger.info("Resharding shard %d/%d" % (shard_num + 1, n_shards))
# Handle shapes
X = np.reshape(X, (len(X),) + self.get_data_shape())
# Note that this means that DiskDataset resharding currently doesn't
# work for datasets that aren't regression/classification.
y = np.reshape(y, (len(y),) + y_shape[1:])
w = np.reshape(w, (len(w),) + w_shape[1:])
X_next = np.concatenate([X_next, X], axis=0)
y_next = np.concatenate([y_next, y], axis=0)
w_next = np.concatenate([w_next, w], axis=0)
ids_next = np.concatenate([ids_next, ids])
while len(X_next) > shard_size:
X_batch, X_next = X_next[:shard_size], X_next[shard_size:]
y_batch, y_next = y_next[:shard_size], y_next[shard_size:]
w_batch, w_next = w_next[:shard_size], w_next[shard_size:]
ids_batch, ids_next = ids_next[:shard_size], ids_next[shard_size:]
yield (X_batch, y_batch, w_batch, ids_batch)
# Handle spillover from last shard
yield (X_next, y_next, w_next, ids_next)
resharded_dataset = DiskDataset.create_dataset(
generator(), data_dir=reshard_dir, tasks=self.tasks)
shutil.rmtree(self.data_dir)
shutil.move(reshard_dir, self.data_dir)
# Should have updated to non-legacy metadata
self.legacy_metadata = False
self.metadata_df = resharded_dataset.metadata_df
# Note that this resets the cache internally
self.save_to_disk()
def get_data_shape(self) -> Shape:
"""Gets array shape of datapoints in this dataset."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
if self.legacy_metadata:
sample_X = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['X']))
return np.shape(sample_X)[1:]
else:
X_shape, _, _, _ = self.get_shape()
return X_shape[1:]
def get_shard_size(self) -> int:
"""Gets size of shards on disk."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
sample_y = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['y']))
return len(sample_y)
def _get_metadata_filename(self) -> Tuple[str, str]:
"""Get standard location for metadata file."""
metadata_filename = os.path.join(self.data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(self.data_dir, "tasks.json")
return tasks_filename, metadata_filename
def get_number_shards(self) -> int:
"""Returns the number of shards for this dataset."""
return self.metadata_df.shape[0]
def itershards(self) -> Iterator[Batch]:
"""Return an object that iterates over all shards in dataset.
Datasets are stored in sharded fashion on disk. Each call to next() for the
generator defined by this function returns the data from a particular shard.
The order of shards returned is guaranteed to remain fixed.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
return (self.get_shard(i) for i in range(self.get_number_shards()))
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
""" Get an object that iterates over minibatches from the dataset.
It is guaranteed that the number of batches returned is
`math.ceil(len(dataset)/batch_size)`. Each minibatch is returned as
a tuple of four numpy arrays: `(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in a batch. If None, then it yields batches
with size equal to the size of each individual shard.
epoch: int, default 1
Number of epochs to walk over dataset
deterministic: bool, default False
Whether or not we should should shuffle each shard before
generating the batches. Note that this is only local in the
sense that it does not ever mix between different shards.
pad_batches: bool, default False
Whether or not we should pad the last batch, globally, such that
it has exactly batch_size elements.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
shard_indices = list(range(self.get_number_shards()))
return self._iterbatches_from_shards(shard_indices, batch_size, epochs,
deterministic, pad_batches)
def _iterbatches_from_shards(self,
shard_indices: Sequence[int],
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over batches from a restricted set of shards."""
def iterate(dataset: DiskDataset, batch_size: Optional[int], epochs: int):
num_shards = len(shard_indices)
if deterministic:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
# mp.dummy aliases ThreadPool to Pool
pool = multiprocessing.dummy.Pool(1)
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
for epoch in range(epochs):
if not deterministic:
shard_perm = np.random.permutation(num_shards)
next_shard = pool.apply_async(dataset.get_shard,
(shard_indices[shard_perm[0]],))
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],))
elif epoch == epochs - 1:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(shard_batch_size, X_b, y_b,
w_b, ids_b)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size, epochs)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = DiskDataset.from_numpy(np.ones((2,2)), np.ones((2,1)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [1.0] [1.0] 0
[1.0, 1.0] [1.0] [1.0] 1
"""
def iterate(dataset):
for (X_shard, y_shard, w_shard, ids_shard) in dataset.itershards():
n_samples = X_shard.shape[0]
for i in range(n_samples):
def sanitize(elem):
if elem is None:
return None
else:
return elem[i]
yield map(sanitize, [X_shard, y_shard, w_shard, ids_shard])
return iterate(self)
def transform(self,
transformer: "dc.trans.Transformer",
parallel: bool = False,
out_dir: Optional[str] = None,
**args) -> "DiskDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times
with different subsets of the data. Each time it is called, it should
transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
parallel: bool, default False
If True, use multiple processes to transform the dataset in parallel.
out_dir: str, optional (default None)
The directory to save the new dataset in. If this is omitted, a
temporary directory is created automaticall.
Returns
-------
DiskDataset
A newly constructed Dataset object
"""
if out_dir is None:
out_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
n_shards = self.get_number_shards()
time1 = time.time()
if parallel:
results = []
pool = multiprocessing.Pool()
for i in range(self.get_number_shards()):
row = self.metadata_df.iloc[i]
X_file = os.path.join(self.data_dir, row['X'])
if row['y'] is not None:
y_file: Optional[str] = os.path.join(self.data_dir, row['y'])
else:
y_file = None
if row['w'] is not None:
w_file: Optional[str] = os.path.join(self.data_dir, row['w'])
else:
w_file = None
ids_file = os.path.join(self.data_dir, row['ids'])
results.append(
pool.apply_async(DiskDataset._transform_shard,
(transformer, i, X_file, y_file, w_file, ids_file,
out_dir, tasks)))
pool.close()
metadata_rows = [r.get() for r in results]
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, out_dir, tasks)
dataset = DiskDataset(out_dir)
else:
def generator():
for shard_num, row in self.metadata_df.iterrows():
logger.info("Transforming shard %d/%d" % (shard_num, n_shards))
X, y, w, ids = self.get_shard(shard_num)
newx, newy, neww, newids = transformer.transform_array(X, y, w, ids)
yield (newx, newy, neww, newids)
dataset = DiskDataset.create_dataset(
generator(), data_dir=out_dir, tasks=tasks)
time2 = time.time()
logger.info("TIMING: transforming took %0.3f s" % (time2 - time1))
return dataset
@staticmethod
def _transform_shard(transformer: "dc.trans.Transformer", shard_num: int,
X_file: str, y_file: str, w_file: str, ids_file: str,
out_dir: str, tasks: np.ndarray) -> List[Optional[str]]:
"""This is called by transform() to transform a single shard."""
X = None if X_file is None else np.array(load_from_disk(X_file))
y = None if y_file is None else np.array(load_from_disk(y_file))
w = None if w_file is None else np.array(load_from_disk(w_file))
ids = np.array(load_from_disk(ids_file))
X, y, w, ids = transformer.transform_array(X, y, w, ids)
basename = "shard-%d" % shard_num
return DiskDataset.write_data_to_disk(out_dir, basename, tasks, X, y, w,
ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Notes
-----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchDiskDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchDiskDataset(
disk_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_numpy(X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None,
tasks: Optional[Sequence] = None,
data_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a DiskDataset object from specified Numpy arrays.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optional (default None)
Labels array.
w: np.ndarray, optional (default None)
Weights array.
ids: np.ndarray, optional (default None)
Identifiers array.
tasks: Sequence, optional (default None)
Tasks in this dataset
data_dir: str, optional (default None)
The directory to write this dataset to. If none is specified, will use
a temporary directory instead.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the provided information.
"""
# To unify shape handling so from_numpy behaves like NumpyDataset, we just
# make a NumpyDataset under the hood
dataset = NumpyDataset(X, y, w, ids)
if tasks is None:
tasks = dataset.get_task_names()
# raw_data = (X, y, w, ids)
return DiskDataset.create_dataset(
[(dataset.X, dataset.y, dataset.w, dataset.ids)],
data_dir=data_dir,
tasks=tasks)
@staticmethod
def merge(datasets: Iterable["Dataset"],
merge_dir: Optional[str] = None) -> "DiskDataset":
"""Merges provided datasets into a merged dataset.
Parameters
----------
datasets: Iterable[Dataset]
List of datasets to merge.
merge_dir: str, optional (default None)
The new directory path to store the merged DiskDataset.
Returns
-------
DiskDataset
A merged DiskDataset.
"""
if merge_dir is not None:
if not os.path.exists(merge_dir):
os.makedirs(merge_dir)
else:
merge_dir = tempfile.mkdtemp()
# Protect against generator exhaustion
datasets = list(datasets)
# This ensures tasks are consistent for all datasets
tasks = []
for dataset in datasets:
try:
tasks.append(dataset.tasks) # type: ignore
except AttributeError:
pass
if tasks:
task_tuples = [tuple(task_list) for task_list in tasks]
if len(tasks) < len(datasets) or len(set(task_tuples)) > 1:
raise ValueError(
'Cannot merge datasets with different task specifications')
merge_tasks = tasks[0]
else:
merge_tasks = []
def generator():
for ind, dataset in enumerate(datasets):
logger.info("Merging in dataset %d/%d" % (ind, len(datasets)))
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
yield (X, y, w, ids)
return DiskDataset.create_dataset(
generator(), data_dir=merge_dir, tasks=merge_tasks)
def subset(self, shard_nums: Sequence[int],
subset_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a subset of the original dataset on disk.
Parameters
----------
shard_nums: Sequence[int]
The indices of shard to extract from the original DiskDataset.
subset_dir: str, optional (default None)
The new directory path to store the subset DiskDataset.
Returns
-------
DiskDataset
A subset DiskDataset.
"""
if subset_dir is not None:
if not os.path.exists(subset_dir):
os.makedirs(subset_dir)
else:
subset_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
def generator():
for shard_num, row in self.metadata_df.iterrows():
if shard_num not in shard_nums:
continue
X, y, w, ids = self.get_shard(shard_num)
yield (X, y, w, ids)
return DiskDataset.create_dataset(
generator(), data_dir=subset_dir, tasks=tasks)
def sparse_shuffle(self) -> None:
"""Shuffling that exploits data sparsity to shuffle large datasets.
If feature vectors are sparse, say circular fingerprints or any other
representation that contains few nonzero values, it can be possible to
exploit the sparsity of the vector to simplify shuffles. This method
implements a sparse shuffle by compressing sparse feature vectors down
into a compressed representation, then shuffles this compressed dataset in
memory and writes the results to disk.
Notes
-----
This method only works for 1-dimensional feature vectors (does not work
for tensorial featurizations). Note that this shuffle is performed in
place.
"""
time1 = time.time()
shard_size = self.get_shard_size()
num_shards = self.get_number_shards()
X_sparses: List[np.ndarray] = []
ys: List[np.ndarray] = []
ws: List[np.ndarray] = []
ids: List[np.ndarray] = []
num_features = -1
for i in range(num_shards):
logger.info("Sparsifying shard %d/%d" % (i, num_shards))
(X_s, y_s, w_s, ids_s) = self.get_shard(i)
if num_features == -1:
num_features = X_s.shape[1]
X_sparse = sparsify_features(X_s)
X_sparses, ys, ws, ids = (X_sparses + [X_sparse], ys + [y_s], ws + [w_s],
ids + [np.atleast_1d(np.squeeze(ids_s))])
# Get full dataset in memory
(X_sparse, y, w, ids) = (np.vstack(X_sparses), np.vstack(ys), np.vstack(ws),
np.concatenate(ids))
# Shuffle in memory
num_samples = len(X_sparse)
permutation = np.random.permutation(num_samples)
X_sparse, y, w, ids = (X_sparse[permutation], y[permutation],
w[permutation], ids[permutation])
# Write shuffled shards out to disk
for i in range(num_shards):
logger.info("Sparse shuffling shard %d/%d" % (i, num_shards))
start, stop = i * shard_size, (i + 1) * shard_size
(X_sparse_s, y_s, w_s, ids_s) = (X_sparse[start:stop], y[start:stop],
w[start:stop], ids[start:stop])
X_s = densify_features(X_sparse_s, num_features)
self.set_shard(i, X_s, y_s, w_s, ids_s)
time2 = time.time()
logger.info("TIMING: sparse_shuffle took %0.3f s" % (time2 - time1))
def complete_shuffle(self, data_dir: Optional[str] = None) -> Dataset:
"""Completely shuffle across all data, across all shards.
Notes
-----
The algorithm used for this complete shuffle is O(N^2) where N is the
number of shards. It simply constructs each shard of the output dataset
one at a time. Since the complete shuffle can take a long time, it's
useful to watch the logging output. Each shuffled shard is constructed
using select() which logs as it selects from each original shard. This
will results in O(N^2) logging statements, one for each extraction of
shuffled shard i's contributions from original shard j.
Parameters
----------
data_dir: Optional[str], (default None)
Directory to write the shuffled dataset to. If none is specified a
temporary directory will be used.
Returns
-------
DiskDataset
A DiskDataset whose data is a randomly shuffled version of this dataset.
"""
N = len(self)
perm = np.random.permutation(N)
shard_size = self.get_shard_size()
return self.select(perm, data_dir, shard_size)
def shuffle_each_shard(self,
shard_basenames: Optional[List[str]] = None) -> None:
"""Shuffles elements within each shard of the dataset.
Parameters
----------
shard_basenames: List[str], optional (default None)
The basenames for each shard. If this isn't specified, will assume the
basenames of form "shard-i" used by `create_dataset` and `reshard`.
"""
tasks = self.get_task_names()
# Shuffle the arrays corresponding to each row in metadata_df
n_rows = len(self.metadata_df.index)
if shard_basenames is not None:
if len(shard_basenames) != n_rows:
raise ValueError(
"shard_basenames must provide a basename for each shard in this DiskDataset."
)
else:
shard_basenames = ["shard-%d" % shard_num for shard_num in range(n_rows)]
for i, basename in zip(range(n_rows), shard_basenames):
logger.info("Shuffling shard %d/%d" % (i, n_rows))
X, y, w, ids = self.get_shard(i)
n = X.shape[0]
permutation = np.random.permutation(n)
X, y, w, ids = (X[permutation], y[permutation], w[permutation],
ids[permutation])
DiskDataset.write_data_to_disk(self.data_dir, basename, tasks, X, y, w,
ids)
# Reset cache
self._cached_shards = None
def shuffle_shards(self) -> None:
"""Shuffles the order of the shards for this dataset."""
metadata_rows = self.metadata_df.values.tolist()
random.shuffle(metadata_rows)
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def get_shard(self, i: int) -> Batch:
"""Retrieves data for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve batch from.
Returns
-------
Batch
A batch data for i-th shard.
"""
# See if we have a cached copy of this shard.
if self._cached_shards is None:
self._cached_shards = [None] * self.get_number_shards()
self._cache_used = 0
if self._cached_shards[i] is not None:
shard = self._cached_shards[i]
return (shard.X, shard.y, shard.w, shard.ids)
# We don't, so load it from disk.
row = self.metadata_df.iloc[i]
X = np.array(load_from_disk(os.path.join(self.data_dir, row['X'])))
if row['y'] is not None:
y = np.array(load_from_disk(os.path.join(self.data_dir, row['y'])))
else:
y = None
if row['w'] is not None:
# TODO (ytz): Under what condition does this exist but the file itself doesn't?
w_filename = os.path.join(self.data_dir, row['w'])
if os.path.exists(w_filename):
w = np.array(load_from_disk(w_filename))
else:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
else:
w = None
ids = np.array(
load_from_disk(os.path.join(self.data_dir, row['ids'])), dtype=object)
# Try to cache this shard for later use. Since the normal usage pattern is
# a series of passes through the whole dataset, there's no point doing
# anything fancy. It never makes sense to evict another shard from the
# cache to make room for this one, because we'll probably want that other
# shard again before the next time we want this one. So just cache as many
# as we can and then stop.
shard = _Shard(X, y, w, ids)
shard_size = X.nbytes + ids.nbytes
if y is not None:
shard_size += y.nbytes
if w is not None:
shard_size += w.nbytes
if self._cache_used + shard_size < self._memory_cache_size:
self._cached_shards[i] = shard
self._cache_used += shard_size
return (shard.X, shard.y, shard.w, shard.ids)
def get_shard_ids(self, i: int) -> np.ndarray:
"""Retrieves the list of IDs for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of ids for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].ids
row = self.metadata_df.iloc[i]
return np.array(
load_from_disk(os.path.join(self.data_dir, row['ids'])), dtype=object)
def get_shard_y(self, i: int) -> np.ndarray:
"""Retrieves the labels for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve labels from.
Returns
-------
np.ndarray
A numpy array of labels for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].y
row = self.metadata_df.iloc[i]
return np.array(
load_from_disk(os.path.join(self.data_dir, row['y'])), dtype=object)
def get_shard_w(self, i: int) -> np.ndarray:
"""Retrieves the weights for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of weights for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].w
row = self.metadata_df.iloc[i]
return np.array(
load_from_disk(os.path.join(self.data_dir, row['w'])), dtype=object)
def add_shard(self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Adds a data shard.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
metadata_rows = self.metadata_df.values.tolist()
shard_num = len(metadata_rows)
basename = "shard-%d" % shard_num
tasks = self.get_task_names()
metadata_rows.append(
DiskDataset.write_data_to_disk(self.data_dir, basename, tasks, X, y, w,
ids))
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def set_shard(self,
shard_num: int,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Writes data shard to disk.
Parameters
----------
shard_num: int
Shard index for shard to set new data.
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
basename = "shard-%d" % shard_num
tasks = self.get_task_names()
DiskDataset.write_data_to_disk(self.data_dir, basename, tasks, X, y, w, ids)
self._cached_shards = None
def select(self,
indices: Sequence[int],
select_dir: Optional[str] = None,
select_shard_size: Optional[int] = None,
output_numpy_dataset: Optional[bool] = False) -> Dataset:
"""Creates a new dataset from a selection of indices from self.
Examples
--------
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> selected = dataset.select([1, 3, 4])
>>> len(selected)
3
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
select_shard_size: Optional[int], (default None)
If specified, the shard-size to use for output selected `DiskDataset`.
If not output_numpy_dataset, then this is set to this current dataset's
shard size if not manually specified.
output_numpy_dataset: Optional[bool], (default False)
If True, output an in-memory `NumpyDataset` instead of a `DiskDataset`.
Note that `select_dir` and `select_shard_size` must be `None` if this
is `True`
Returns
-------
Dataset
A dataset containing the selected samples. The default dataset is `DiskDataset`.
If `output_numpy_dataset` is True, the dataset is `NumpyDataset`.
"""
if output_numpy_dataset and (select_dir is not None or
select_shard_size is not None):
raise ValueError(
"If output_numpy_dataset is set, then select_dir and select_shard_size must both be None"
)
if output_numpy_dataset:
# When outputting a NumpyDataset, we have 1 in-memory shard
select_shard_size = len(indices)
else:
if select_dir is not None:
if not os.path.exists(select_dir):
os.makedirs(select_dir)
else:
select_dir = tempfile.mkdtemp()
if select_shard_size is None:
select_shard_size = self.get_shard_size()
# Handle edge case with empty indices
if not len(indices):
if not output_numpy_dataset:
return DiskDataset.create_dataset([], data_dir=select_dir)
else:
return NumpyDataset(
np.array([]), np.array([]), np.array([]), np.array([]))
N = len(indices)
indices = np.array(indices).astype(int)
tasks = self.get_task_names()
n_shards = self.get_number_shards()
# We use two loops here. The outer while loop walks over selection shards
# (the chunks of the indices to select that should go into separate
# output shards), while the inner for loop walks over the shards in the
# source datasets to select out the shard indices from that source shard
def generator():
start = 0
select_shard_num = 0
while start < N:
logger.info(
"Constructing selection output shard %d" % (select_shard_num + 1))
end = min(start + select_shard_size, N)
select_shard_indices = indices[start:end]
sorted_indices = np.array(sorted(select_shard_indices)).astype(int)
Xs, ys, ws, ids_s = [], [], [], []
count, indices_count = 0, 0
for shard_num in range(self.get_number_shards()):
logger.info(
"Selecting from input shard %d/%d for selection output shard %d" %
(shard_num + 1, n_shards, select_shard_num + 1))
if self.legacy_metadata:
ids = self.get_shard_ids(shard_num)
shard_len = len(ids)
else:
shard_X_shape, _, _, _ = self._get_shard_shape(shard_num)
if len(shard_X_shape) > 0:
shard_len = shard_X_shape[0]
else:
shard_len = 0
# Find indices which rest in this shard
num_shard_elts = 0
while sorted_indices[indices_count +
num_shard_elts] < count + shard_len:
num_shard_elts += 1
if (indices_count + num_shard_elts) >= len(sorted_indices):
break
if num_shard_elts == 0:
count += shard_len
continue
else:
X, y, w, ids = self.get_shard(shard_num)
# Need to offset indices to fit within shard_size
shard_inds = sorted_indices[indices_count:indices_count +
num_shard_elts] - count
# Handle empty case where no data from this shard needed
X_sel = X[shard_inds]
# Handle the case of datasets with y/w missing
if y is not None:
y_sel = y[shard_inds]
else:
y_sel = None
if w is not None:
w_sel = w[shard_inds]
else:
w_sel = None
ids_sel = ids[shard_inds]
Xs.append(X_sel)
ys.append(y_sel)
ws.append(w_sel)
ids_s.append(ids_sel)
indices_count += num_shard_elts
count += shard_len
# Break if all indices have been used up already
if indices_count >= len(sorted_indices):
break
# Note these will be in the sorted order
X = np.concatenate(Xs, axis=0)
y = np.concatenate(ys, axis=0)
w = np.concatenate(ws, axis=0)
ids = np.concatenate(ids_s, axis=0)
# We need to recover the original ordering. We can do this by using
# np.where to find the locatios of the original indices in the sorted
# indices.
reverted_indices = np.array(
# We know there's only one match for np.where since this is a
# permutation, so the [0][0] pulls out the exact match location.
[
np.where(sorted_indices == orig_index)[0][0]
for orig_index in select_shard_indices
])
X, y, w, ids = X[reverted_indices], y[reverted_indices], w[
reverted_indices], ids[reverted_indices]
yield (X, y, w, ids)
start = end
select_shard_num += 1
if not output_numpy_dataset:
return DiskDataset.create_dataset(
generator(), data_dir=select_dir, tasks=tasks)
else:
X, y, w, ids = next(generator())
return NumpyDataset(X, y, w, ids)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ids = []
for i in range(self.get_number_shards()):
ids.append(np.atleast_1d(np.squeeze(self.get_shard_ids(i))))
return np.concatenate(ids)
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
Xs = []
one_dimensional = False
for (X_b, _, _, _) in self.itershards():
Xs.append(X_b)
if len(X_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(Xs)
else:
return np.concatenate(Xs)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ys = []
one_dimensional = False
for i in range(self.get_number_shards()):
y_b = self.get_shard_y(i)
ys.append(y_b)
if len(y_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ys)
else:
return np.concatenate(ys)
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
ws = []
one_dimensional = False
for i in range(self.get_number_shards()):
w_b = self.get_shard_w(i)
ws.append(w_b)
if len(w_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ws)
else:
return np.concatenate(ws)
@property
def memory_cache_size(self) -> int:
"""Get the size of the memory cache for this dataset, measured in bytes."""
return self._memory_cache_size
@memory_cache_size.setter
def memory_cache_size(self, size: int) -> None:
"""Get the size of the memory cache for this dataset, measured in bytes."""
self._memory_cache_size = size
if self._cache_used > size:
self._cached_shards = None
def __len__(self) -> int:
"""Finds number of elements in dataset."""
total = 0
for _, row in self.metadata_df.iterrows():
y = load_from_disk(os.path.join(self.data_dir, row['ids']))
total += len(y)
return total
def _get_shard_shape(self,
shard_num: int) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds the shape of the specified shard."""
if self.legacy_metadata:
raise ValueError(
"This function requires the new metadata format to be called. Please reshard this dataset by calling the reshard() method."
)
n_tasks = len(self.get_task_names())
row = self.metadata_df.iloc[shard_num]
if row['X_shape'] is not None:
shard_X_shape = make_tuple(str(row['X_shape']))
else:
shard_X_shape = tuple()
if n_tasks > 0:
if row['y_shape'] is not None:
shard_y_shape = make_tuple(str(row['y_shape']))
else:
shard_y_shape = tuple()
if row['w_shape'] is not None:
shard_w_shape = make_tuple(str(row['w_shape']))
else:
shard_w_shape = tuple()
else:
shard_y_shape = tuple()
shard_w_shape = tuple()
if row['ids_shape'] is not None:
shard_ids_shape = make_tuple(str(row['ids_shape']))
else:
shard_ids_shape = tuple()
X_shape, y_shape, w_shape, ids_shape = tuple(
np.array(shard_X_shape)), tuple(np.array(shard_y_shape)), tuple(
np.array(shard_w_shape)), tuple(np.array(shard_ids_shape))
return X_shape, y_shape, w_shape, ids_shape
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds shape of dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
n_tasks = len(self.get_task_names())
n_rows = len(self.metadata_df.index)
# If shape metadata is available use it to directly compute shape from
# metadata
if not self.legacy_metadata:
for shard_num in range(n_rows):
shard_X_shape, shard_y_shape, shard_w_shape, shard_ids_shape = self._get_shard_shape(
shard_num)
if shard_num == 0:
X_shape, y_shape, w_shape, ids_shape = np.array(
shard_X_shape), np.array(shard_y_shape), np.array(
shard_w_shape), np.array(shard_ids_shape)
else:
X_shape[0] += shard_X_shape[0]
if n_tasks > 0:
y_shape[0] += shard_y_shape[0]
w_shape[0] += shard_w_shape[0]
ids_shape[0] += shard_ids_shape[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(ids_shape)
# In absense of shape metadata, fall back to loading data from disk to
# find shape.
else:
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
if shard_num == 0:
X_shape = np.array(X.shape)
if n_tasks > 0:
y_shape = np.array(y.shape)
w_shape = np.array(w.shape)
else:
y_shape = tuple()
w_shape = tuple()
ids_shape = np.array(ids.shape)
else:
X_shape[0] += np.array(X.shape)[0]
if n_tasks > 0:
y_shape[0] += np.array(y.shape)[0]
w_shape[0] += np.array(w.shape)[0]
ids_shape[0] += np.array(ids.shape)[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(ids_shape)
def get_label_means(self) -> pd.DataFrame:
"""Return pandas series of label means."""
return self.metadata_df["y_means"]
def get_label_stds(self) -> pd.DataFrame:
"""Return pandas series of label stds."""
return self.metadata_df["y_stds"]
class ImageDataset(Dataset):
"""A Dataset that loads data from image files on disk."""
def __init__(self,
X: Union[np.ndarray, List[str]],
y: Optional[Union[np.ndarray, List[str]]],
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Create a dataset whose X and/or y array is defined by image files on disk.
Parameters
----------
X: np.ndarray or List[str]
The dataset's input data. This may be either a single NumPy
array directly containing the data, or a list containing the
paths to the image files
y: np.ndarray or List[str]
The dataset's labels. This may be either a single NumPy array
directly containing the data, or a list containing the paths to
the image files
w: np.ndarray, optional (default None)
a 1D or 2D array containing the weights for each sample or
sample/task pair
ids: np.ndarray, optional (default None)
the sample IDs
"""
n_samples = len(X)
if y is None:
y = np.zeros((n_samples,))
self._X_shape = self._find_array_shape(X)
self._y_shape = self._find_array_shape(y)
if w is None:
if len(self._y_shape) == 0:
# Case n_samples should be 1
if n_samples != 1:
raise ValueError("y can only be a scalar if n_samples == 1")
w = np.ones_like(y)
elif len(self._y_shape) == 1:
w = np.ones(self._y_shape[0], np.float32)
else:
w = np.ones((self._y_shape[0], 1), np.float32)
if ids is None:
if not isinstance(X, np.ndarray):
ids = X
elif not isinstance(y, np.ndarray):
ids = y
else:
ids = np.arange(n_samples)
self._X = X
self._y = y
self._w: np.ndarray = w
self._ids = np.array(ids, dtype=object)
def _find_array_shape(self, array: Sequence) -> Shape:
if isinstance(array, np.ndarray):
return array.shape
image_shape = load_image_files([array[0]]).shape[1:]
return np.concatenate([[len(array)], image_shape])
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return self._X_shape[0]
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X_shape, self._y_shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y_shape) < 2:
return np.array([0])
return np.arange(self._y_shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
if isinstance(self._X, np.ndarray):
return self._X
return load_image_files(self._X)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if isinstance(self._y, np.ndarray):
return self._y
return load_image_files(self._y)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, default False
If True, follow deterministic order.
pad_batches: bool, default False
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = load_image_files([dataset._X[i] for i in perm_indices])
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = load_image_files([dataset._y[i] for i in perm_indices])
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def _get_image(self, array: Union[np.ndarray, List[str]],
indices: Union[int, np.ndarray]) -> np.ndarray:
"""Method for loading an image
Parameters
----------
array: Union[np.ndarray, List[str]]
A numpy array which contains images or List of image filenames
indices: Union[int, np.ndarray]
Index you want to get the images
Returns
-------
np.ndarray
Loaded images
"""
if isinstance(array, np.ndarray):
return array[indices]
if isinstance(indices, np.ndarray):
return load_image_files([array[i] for i in indices])
return load_image_files([array[indices]])[0]
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
n_samples = self._X_shape[0]
return ((self._get_image(self._X, i), self._get_image(self._y, i),
self._w[i], self._ids[i]) for i in range(n_samples))
def transform(
self,
transformer: "dc.trans.Transformer",
**args,
) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self.X, self.y, self.w, self.ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self, indices: Sequence[int],
select_dir: Optional[str] = None) -> "ImageDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`ImageDataset` is purely in-memory.
Returns
-------
ImageDataset
A selected ImageDataset object
"""
if isinstance(self._X, np.ndarray):
X = self._X[indices]
else:
X = [self._X[i] for i in indices]
if isinstance(self._y, np.ndarray):
y = self._y[indices]
else:
y = [self._y[i] for i in indices]
w = self._w[indices]
ids = self._ids[indices]
return ImageDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Notes
-----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchImageDataset
except:
raise ValueError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchImageDataset(
image_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
class Databag(object):
"""A utility class to iterate through multiple datasets together.
A `Databag` is useful when you have multiple datasets that you want
to iterate in locksteps. This might be easiest to grasp with a
simple code example.
>>> ones_dataset = NumpyDataset(X=np.ones((5, 3)))
>>> zeros_dataset = NumpyDataset(X=np.zeros((5, 3)))
>>> databag = Databag({"ones": ones_dataset, "zeros": zeros_dataset})
>>> for sample_dict in databag.iterbatches(batch_size=1):
... print(sample_dict)
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
Note how we get a batch at a time from each of the datasets in the
`Databag`. This can be useful for training models that combine data
from multiple `Dataset` objects at a time.
"""
def __init__(self, datasets: Optional[Dict[Any, Dataset]] = None) -> None:
"""Initialize this `Databag`.
Parameters
----------
datasets: dict, optional (default None)
A dictionary mapping keys to `Dataset` objects.
"""
if datasets is None:
self.datasets = dict()
else:
self.datasets = datasets
def add_dataset(self, key: Any, dataset: Dataset) -> None:
"""Adds a dataset to this databag.
Parameters
----------
key: Any, hashable value
Key to be added
dataset: Dataset
The dataset that `key` should point to.
"""
self.datasets[key] = dataset
def iterbatches(self, **kwargs) -> Iterator[Dict[str, np.ndarray]]:
"""Loop through all internal datasets in the same order.
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epochs: int
Number of times to loop through the datasets
pad_batches: bool
Should all batches==batch_size
Returns
-------
Iterator[Dict[str, np.ndarray]]
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs['epochs']
del kwargs['epochs']
else:
epochs = 1
kwargs['deterministic'] = True
for epoch in range(epochs):
iterators = [self.datasets[x].iterbatches(**kwargs) for x in key_order]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
| mit |
tom-f-oconnell/multi_tracker | scripts/hdf5_to_csv.py | 1 | 1208 | #!/usr/bin/env python
from __future__ import print_function
from os.path import join, splitext
import glob
import pandas as pd
#import matplotlib.pyplot as plt
import multi_tracker_analysis as mta
def main():
#experiment_dir = 'choice_20210129_162648'
experiment_dir = '.'
def find_file_via_suffix(suffix):
files = glob.glob(join(experiment_dir, '*' + suffix))
assert len(files) == 1
return files[0]
csv_fname = find_file_via_suffix('_stimuli.csv')
sdf = pd.read_csv(csv_fname)
for hdf5_fname in glob.glob(join(experiment_dir, '*_trackedobjects.hdf5')):
print(hdf5_fname)
# TODO suppress load message about config files we aren't using.
# seems the latter is None. but what else could it be?
df, _ = mta.read_hdf5_file_to_pandas.load_and_preprocess_data(
hdf5_fname
)
csv_fname = splitext(hdf5_fname)[0] + '.csv'
# TODO TODO TODO also add speed / velocity columns after checking they
# are correct / fixing if need be
df.to_csv(csv_fname, columns=['time_epoch', 'position_x', 'position_y'],
index=False
)
if __name__ == '__main__':
main()
| mit |
ml-lab/neuralnilm | neuralnilm/data/stridesource.py | 4 | 6512 | from __future__ import print_function, division
from copy import copy
from datetime import timedelta
import numpy as np
import pandas as pd
import nilmtk
from nilmtk.timeframegroup import TimeFrameGroup
from nilmtk.timeframe import TimeFrame
from neuralnilm.data.source import Sequence
from neuralnilm.utils import check_windows
from neuralnilm.data.source import Source
from neuralnilm.consts import DATA_FOLD_NAMES
import logging
logger = logging.getLogger(__name__)
class StrideSource(Source):
"""
Attributes
----------
data : dict
Structure example:
{<train | unseen_appliances | unseen_activations_of_seen_appliances>: {
<building_name>: pd.DataFrame of with 2 cols: mains, target
}}
_num_seqs : pd.Series with 2-level hierarchical index
L0 : train, unseen_appliances, unseen_activations_of_seen_appliances
L1 : building_names
"""
def __init__(self, target_appliance,
seq_length, filename, windows, sample_period,
stride=None,
rng_seed=None):
self.target_appliance = target_appliance
self.seq_length = seq_length
self.filename = filename
check_windows(windows)
self.windows = windows
self.sample_period = sample_period
self.stride = self.seq_length if stride is None else stride
self._reset()
super(StrideSource, self).__init__(rng_seed=rng_seed)
# stop validation only when we've gone through all validation data
self.num_batches_for_validation = None
self._load_data_into_memory()
self._compute_num_sequences_per_building()
def _reset(self):
self.data = {}
self._num_seqs = pd.Series()
def _load_data_into_memory(self):
logger.info("Loading NILMTK data...")
# Load dataset
dataset = nilmtk.DataSet(self.filename)
for fold, buildings_and_windows in self.windows.iteritems():
for building_i, window in buildings_and_windows.iteritems():
dataset.set_window(*window)
elec = dataset.buildings[building_i].elec
building_name = (
dataset.metadata['name'] +
'_building_{}'.format(building_i))
# Mains
logger.info(
"Loading data for {}...".format(building_name))
mains_meter = elec.mains()
mains_good_sections = mains_meter.good_sections()
appliance_meter = elec[self.target_appliance]
good_sections = appliance_meter.good_sections(
sections=mains_good_sections)
def load_data(meter):
return meter.power_series_all_data(
sample_period=self.sample_period,
sections=good_sections).astype(np.float32).dropna()
mains_data = load_data(mains_meter)
appliance_data = load_data(appliance_meter)
df = pd.DataFrame(
{'mains': mains_data, 'target': appliance_data},
dtype=np.float32).dropna()
del mains_data
del appliance_data
if not df.empty:
self.data.setdefault(fold, {})[building_name] = df
logger.info(
"Loaded data from building {} for fold {}"
" from {} to {}."
.format(building_name, fold, df.index[0], df.index[-1]))
dataset.store.close()
logger.info("Done loading NILMTK mains data.")
def _compute_num_sequences_per_building(self):
index = []
all_num_seqs = []
for fold, buildings in self.data.iteritems():
for building_name, df in buildings.iteritems():
remainder = len(df) - self.seq_length
num_seqs = np.ceil(remainder / self.stride) + 1
num_seqs = max(0 if df.empty else 1, int(num_seqs))
index.append((fold, building_name))
all_num_seqs.append(num_seqs)
multi_index = pd.MultiIndex.from_tuples(
index, names=["fold", "building_name"])
self._num_seqs = pd.Series(all_num_seqs, multi_index)
def get_sequence(self, fold='train', enable_all_appliances=False):
if enable_all_appliances:
raise ValueError("`enable_all_appliances` is not implemented yet"
" for StrideSource!")
# select building
building_divisions = self._num_seqs[fold].cumsum()
total_seq_for_fold = self._num_seqs[fold].sum()
building_row_i = 0
building_name = building_divisions.index[0]
prev_division = 0
for seq_i in range(total_seq_for_fold):
if seq_i == building_divisions.iloc[building_row_i]:
prev_division = seq_i
building_row_i += 1
building_name = building_divisions.index[building_row_i]
seq_i_for_building = seq_i - prev_division
start_i = seq_i_for_building * self.stride
end_i = start_i + self.seq_length
data_for_seq = self.data[fold][building_name].iloc[start_i:end_i]
def get_data(col):
data = data_for_seq[col].values
n_zeros_to_pad = self.seq_length - len(data)
data = np.pad(
data, pad_width=(0, n_zeros_to_pad), mode='constant')
return data[:, np.newaxis]
seq = Sequence(self.seq_length)
seq.input = get_data('mains')
seq.target = get_data('target')
assert len(seq.input) == self.seq_length
assert len(seq.target) == self.seq_length
# Set mask
seq.weights = np.ones((self.seq_length, 1), dtype=np.float32)
n_zeros_to_pad = self.seq_length - len(data_for_seq)
if n_zeros_to_pad > 0:
seq.weights[-n_zeros_to_pad:, 0] = 0
# Set metadata
seq.metadata = {
'seq_i': seq_i,
'building_name': building_name,
'total_num_sequences': total_seq_for_fold,
'start_date': data_for_seq.index[0],
'end_date': data_for_seq.index[-1]
}
yield seq
@classmethod
def _attrs_to_remove_for_report(cls):
return ['data', '_num_seqs', 'rng']
| apache-2.0 |
cduvedi/CS229-project | feature_extraction/eigen_faces_refactored.py | 1 | 5401 | import os
import csv
import numpy
import scipy
import random
import pylab as pl
from scipy import linalg
from scipy.misc import toimage
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn import svm
from sklearn.svm import SVC
#====================================================
# Terminology used:
# Sample/DataSample -> (Y,[Xs])
# FeatureVector -> [Xs]
# Label -> Y
#====================================================
#====================================================
# main() function in the script
#====================================================
def main():
labelledData = readLabelledDataFromCSV('/root/CS229-project/Datasets/train_small.csv')
labelledTestData = readLabelledDataFromCSV('/root/CS229-project/Datasets/test_small.csv')
pca = extractPCA( labelledData, labelledTestData )
pcaFeatureVectors = mapRawFeaturesToPCAFeatures( labelledData, pca )
writeFeatureVectorsToFile('train.feat', pcaFeatureVectors)
classifier = trainSVM(pcaFeatureVectors, labelledData.labels)
pcaTestVectors = mapRawFeaturesToPCAFeatures( labelledTestData, pca )
testSVM(pcaTestVectors, labelledTestData.labels, classifier)
testSVM(pcaFeatureVectors, labelledData.labels, classifier)
#====================================================
# Helper functions.
#====================================================
def readLabelledDataFromCSV(fileName):
labelledData = LabelledData()
fileHandle = open(fileName, 'r')
reader = csv.reader(fileHandle)
for row in reader:
labelStr, featureStr, tp = row
label = int(labelStr)
features = map(lambda x: float(x), featureStr.split(' '))
labelledData.addDataSample(label, features)
# Debug purposes
for i in range(0, 7):
print len(labelledData.labelToFeatureVectors[i])
return labelledData
def extractPCA(labelledData, testData):
#randomFeatureVectors = labelledData.getRandomFeatureVectors(numSamplesPerLabel = 10)
randomFeatureVectors = labelledData.featureVectors
randomFaceArray = numpy.array(randomFeatureVectors)
faceMean = numpy.mean(randomFaceArray, 0)
facesAdjusted = randomFaceArray - faceMean
for i in range(0, len(labelledData.featureVectors)):
for j in range(0, len(labelledData.featureVectors[i])):
labelledData.featureVectors[i][j] - labelledData.featureVectors[i][j] - faceMean[j]
for i in range(0, len(testData.featureVectors)):
for j in range(0, len(testData.featureVectors[i])):
testData.featureVectors[i][j] = testData.featureVectors[i][j] - faceMean[j]
pca = RandomizedPCA(n_components = 65, whiten=True).fit(facesAdjusted)
print "PCA completed"
print len(pca.components_)
return pca
def mapRawFeaturesToPCAFeatures(labelledData, pca):
return pca.transform(labelledData.featureVectors)
def readFeatureVectorsFromFile(fileName):
pass
def writeFeatureVectorsToFile(fileName, featureVectors):
fileHandle = open(fileName, 'w')
for v in featureVectors:
fileHandle.write("%s\n" % v)
fileHandle.close()
def trainSVM(featureVectors, labels):
param_grid = {'C': [1e3, 5e3], 'gamma': [0.0001, 0.1], }
#param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
expr_classifier = GridSearchCV(svm.SVC(kernel='rbf', class_weight='auto'), param_grid)
# expr_classifier = svm.SVC(C = 0.0001)
expr_classifier.fit(featureVectors, labels)
# print("Number of support vectors")
# print(expr_classifier.n_support_)
print("Best estimator found by grid search:")
print(clf.best_estimator_)
return expr_classifier
def testSVM(testVectors, labels, classifier):
numCorrect = 0
index = 0
fileHandle = open('labels.txt', 'w')
for testVector in testVectors:
predictedLabel = classifier.predict(testVector)
fileHandle.write("%s \t %s \n" % (predictedLabel, labels[index]))
if (predictedLabel == labels[index]):
numCorrect = numCorrect + 1
index = index + 1
print numCorrect
fileHandle.close()
#====================================================
# LabelledData class
# Stores a set of labelled data points
#====================================================
class LabelledData():
def __init__(self):
self.labels = []
self.featureVectors = []
self.labelToFeatureVectors = {} # {Y1: [X1,X2,..], Y2: [X5,X6,..]}
for i in range(7):
self.labelToFeatureVectors[i] = []
def getSample(self, index):
if (index < len(labels)):
return (self.labels[index], self.featureVectors[index])
print("[WARNING]: Index (" + index + ") greater than number of samples (" + len(labels) + ") in the dataset")
return ("", [])
def addDataSample(self, label, features):
self.labels.append(label)
self.featureVectors.append(features)
self.labelToFeatureVectors[label].append(features)
def getRandomFeatureVectors(self, numSamplesPerLabel):
randomFeatureVectors = []
for label in self.labelToFeatureVectors:
featureVectors = self.labelToFeatureVectors[label]
numSamples = len(featureVectors)
if (numSamples > numSamplesPerLabel):
randomIndices = random.sample(range(numSamples), 10)
for i in randomIndices:
randomFeatureVectors.append(featureVectors[i])
return randomFeatureVectors
def size(self):
return len(labels)
main()
| gpl-2.0 |
sgrieve/LH_Paper_Plotting | Plotting_Code/Figure_8_revision.py | 1 | 5620 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015 Stuart W.D Grieve 2015
Developer can be contacted by s.grieve _at_ ed.ac.uk
This program is free software;
you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY;
without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the
GNU General Public License along with this program;
if not, write to:
Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301
USA
Script to generate Figure 5 from Grieve et al. (2015)
Input data is generated using LH_Driver.cpp
Parameters to be modified are highlighted by comments
@author: SWDG
"""
def mm_to_inch(mm):
return mm*0.0393700787
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
import MuddPyStatsTools as mpy
import string
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 10
rcParams['xtick.direction'] = 'out'
rcParams['ytick.direction'] = 'out'
#================ modifyable parameters start here ====================
#paths to the data and to save the figure to
path = '' #path to the folder contaning the hilltopdata files
Filenames = ['C:/Users/Stuart/Dropbox/final_data_for_plotting/nc/NC_HilltopData_RAW.csv','C:/Users/Stuart/Dropbox/final_data_for_plotting/or/OR_HilltopData_RAW.csv','C:/Users/Stuart/Dropbox/final_data_for_plotting/gm/GM_HilltopData_RAW.csv','C:/Users/Stuart/Dropbox/final_data_for_plotting/cr/CR2_HilltopData_RAW.csv',] #names of the hilltopdata files
figpath = 'C:/Users/Stuart/Desktop/FR/final_figures/' #path to save the final figures
#plot style parameters
xmaxes = [1300,450,700,800]
ymaxes = [5000,9500,2300,17000]
xsteps = [400,100,150,200]
ysteps = [1500,2000,500,3000]
title_moves = [0.,0.04,0.,0.]
#plot labels
locations = ['Coweeta','Oregon Coast Range','Gabilan Mesa','Sierra Nevada']
fig_labels = list(string.ascii_lowercase)[:4] #generate subplot labels
#================ modifyable parameters end here ====================
fig = plt.figure()
for subplot_count, (filename,location,xmax,ymax,xstep,ystep,title_move,labels) in enumerate(zip(Filenames,locations,xmaxes,ymaxes,xsteps,ysteps,title_moves,fig_labels)):
#load the hilltopdata file to get the LH data
with open(path+filename,'r') as f:
f.readline()
data = f.readlines()
LH = []
for d in data:
if 'fail' not in d:
split = d.split(',')
relief = float(split[4])
lh = float(split[5])
hilltop_slope = float(split[8])
if (lh > 2.0):
if (relief > 2.0):
if (hilltop_slope < 1.2):
LH.append(lh)
#get the median absolute devaition
MAD = mpy.calculate_MedianAbsoluteDeviation(LH)
#set up the 4 subplots
ax = plt.subplot(2,2,subplot_count + 1)
#plot the histogram and get the patches so we can colour them
n,bins,patches = plt.hist(LH,bins=200,color='k',linewidth=0)
#Add a title with the location name
ax.text(.5+title_move,.9,location, horizontalalignment='center', transform=ax.transAxes,fontsize=12)
#get the median -/+ median devaition
MinMAD = np.median(LH)-MAD
MaxMAD = np.median(LH)+MAD
#color the bins that fall within +/- MAD of the median
#http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin
for patch, rightside, leftside in zip(patches, bins[1:], bins[:-1]):
if rightside < MinMAD:
patch.set_alpha(0.4)
elif leftside > MaxMAD:
patch.set_alpha(0.4)
#Insert dashed red line at median
plt.vlines(np.median(LH),0,ymax,label='Median', color='r',linewidth=1,linestyle='dashed')
#set the x and y max based on the input params
plt.xlim(0,xmax)
plt.ylim(0,ymax)
#format the ticks to only appear on the bottom and left axes
plt.tick_params(axis='x', which='both', top='off',length=2)
plt.tick_params(axis='y', which='both', right='off',length=2)
#configure tick spacing based on the defined spacings given
ax.xaxis.set_ticks(np.arange(0,xmax+1,xstep))
ax.yaxis.set_ticks(np.arange(0,ymax+1,ystep))
#annotate the plot with the median and MAD and the subplot label
plt.annotate('Median = '+str(int(round(np.median(LH),0)))+' m\nMAD = '+str(int(round(MAD,0)))+' m', xy=(0.55, 0.4), xycoords='axes fraction', fontsize=10, horizontalalignment='left', verticalalignment='top')
plt.annotate(labels, xy=(0.95, 0.95), xycoords='axes fraction', fontsize=10, horizontalalignment='left', verticalalignment='top')
#spacing of the plots
plt.subplots_adjust(hspace = 0.25)
#x and y axis labels
fig.text(0.5, 0.02, 'Hillslope Length (m)', ha='center', va='center', size=12)
fig.text(0.06, 0.5, 'Count', ha='center', va='center', rotation='vertical', size=12)
#set the size of the plot to be saved. These are the JGR sizes:
#quarter page = 95*115
#half page = 190*115 (horizontal) 95*230 (vertical)
#full page = 190*230
fig.set_size_inches(mm_to_inch(190), mm_to_inch(115))
plt.savefig(figpath+'Figure_5_raw.png', dpi = 500) #change to *.tif for submission | gpl-2.0 |
arcyfelix/Machine-Learning-For-Trading | 34_correlation.py | 1 | 2422 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2009-01-01'
end_date = '2015-12-31'
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
print(df)
return df
def plot(df):
ax = df.plot(title = 'Stock prices', fontsize = 12)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def get_daily_returns(df):
daily_returns = df.copy()
# Calculating daily returns
daily_returns[1:] = (df / df.shift(1)) - 1
# Setting daily returns for row 0 to 0.
daily_returns.ix[0, :] = 0
return daily_returns
def show_scatter(df, x, y):
df.plot(kind = 'scatter', x= x, y= y)
beta, alpha = calculate_alpha_beta(df, x, y)
# Line -> beta * x + alpha for all values of x
plt.plot(df[x], beta * df[x] + alpha, '-', color = 'r')
plt.show()
print('Beta for', y + ':')
print(beta)
print('Alpha for', y + ':')
print(alpha)
def calculate_alpha_beta(df, x, y):
beta, alpha = np.polyfit(df[x], df[y] , 1) # First order polynomial = 1
return beta, alpha
def calculate_correlation(df):
'''Calculating correlation using the most common method - > pearson.'''
print(df.corr(method = 'pearson'))
symbols = ['SPY', 'IBM', 'AAPL']
if __name__ == "__main__":
dates = dates_creator()
df = get_data(symbols, dates)
daily_returns = get_daily_returns(df)
plot(df)
plot(daily_returns)
calculate_correlation(daily_returns)
| apache-2.0 |
nhejazi/scikit-learn | sklearn/manifold/t_sne.py | 3 | 35216 | # Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = NearestNeighbors(algorithm=neighbors_method, n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
| bsd-3-clause |
ilyes14/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
julienmalard/Tinamit | tinamit/geog/mapa.py | 1 | 14476 | import os
import matplotlib.pyplot as dib
import numpy as np
import shapefile as sf
from matplotlib import colors, cm
from matplotlib.axes import Axes
from matplotlib.backends.backend_agg import FigureCanvasAgg as TelaFigura
from matplotlib.figure import Figure as Figura
from tinamit.config import _
from ..mod import ResultadosSimul, ResultadosGrupo
def dibujar_mapa(formas, archivo=None, título=None, fig=None, args_color=None):
"""
Dibuja un mapa.
Parameters
----------
formas: list of Forma
Las formas para incluir.
archivo: str
Dónde hay que guardar el gráfico. Si es ``None``, no se guardará el gráfico.
título: str
El título del mapa.
fig: matplotlib.Figure
Figura para dibujar el mapa.
Returns
-------
tuple[Figure, Axes]
La figura y sus ejes.
"""
formas = [formas] if isinstance(formas, Forma) else formas
if not fig:
fig = Figura()
TelaFigura(fig)
ejes = fig.add_subplot(111)
ejes.set_aspect('equal')
for frm in formas:
frm.dibujar(ejes, fig, args_color=args_color)
if título is not None:
ejes.set_title(título)
if archivo:
fig.savefig(archivo, dpi=500)
return fig, ejes
def dibujar_mapa_de_res(
forma_dinámica, res, var, t, escala=None, título='', directorio=None, otras_formas=None
):
"""
Dibujar los resultados de una simulación en un mapa.
Parameters
----------
forma_dinámica: FormaDinámica
La forma cuyos colores variarán según los resultados.
res: ResultadosSimul or ResultadosGrupo
Los resultados para dibujar.
var: str
El variable de interés.
t: int or tuple or range or list
Los tiempos a los cuales queremos graficar los resultados.
escala: tuple
El rango para aplicar colores. Si es ``None``, se aplicará según los datos de los resultados.
título: str
El título del gráfico.
directorio: str
Dónnde hay que guardar el gráfico.
otras_formas: list of FormaEstática or FormaEstática
Las otras formas (estáticas) para incluir en el gráfico.
"""
título = título or res.nombre
otras_formas = otras_formas or []
if isinstance(otras_formas, FormaEstática):
otras_formas = [otras_formas]
def _extr_i(matr, i):
return matr[i] if isinstance(i, int) else matr.sel(**{_('fecha'): i}).values
if isinstance(res, ResultadosSimul):
res_var = res[var].vals
unids = res[var].var.unid
escala = escala or (np.min(res_var.values), np.max(res_var.values))
def _extr_res_i(i):
return _extr_i(res_var, i)
else:
unids = list(res.values())[0][var].var.unid
todos_vals = np.array([res_lg[var].vals for res_lg in res.values()])
escala = escala or (np.min(todos_vals), np.max(todos_vals))
def _extr_res_i(i):
return {lg: _extr_i(res_lg[var].vals, i) for lg, res_lg in res.items()}
if isinstance(t, int):
pasos = range(t, t + 1)
else:
pasos = t
for i_p in pasos:
título_i = '{}_{}'.format(título, i_p)
res_i = _extr_res_i(i_p)
forma_dinámica.estab_valores(res_i, escala_valores=escala, unidades=unids)
arch_i = os.path.join(directorio, título_i) if directorio is not None else None
fig_i = None if directorio else dib.figure()
dibujar_mapa([forma_dinámica, *otras_formas], archivo=arch_i, título=título_i, fig=fig_i)
class Forma(object):
"""
Clase pariente para todas las formas que se pueden dibujar.
"""
def __init__(símismo, archivo, llenar, alpha, **argsll):
# codif = detectar_codif(os.path.splitext(archivo)[0] + '.dbf')
símismo.forma = sf.Reader(archivo, **argsll) # , encoding=codif)
símismo.llenar = llenar
símismo.alpha = alpha
def dibujar(símismo, ejes, fig, args_color=None):
"""
Agrega la forma a la figura.
Parameters
----------
ejes:
Los ejes de la figura.
fig:
La figura.
"""
raise NotImplementedError
def _dibujar_frm(símismo, ejes, color):
for i, frm in enumerate(símismo.forma.shapes()):
puntos = frm.points
partes = frm.parts
for ip, i0 in enumerate(partes): # Para cada parte del imagen
if ip < len(partes) - 1:
i1 = partes[ip + 1] - 1
else:
i1 = len(puntos)
seg = puntos[i0:i1 + 1]
x_lon = np.zeros((len(seg), 1))
y_lat = np.zeros((len(seg), 1))
for j in range(len(seg)):
x_lon[j] = seg[j][0]
y_lat[j] = seg[j][1]
clr = color[i] if isinstance(color, np.ndarray) else color
if símismo.llenar:
ejes.fill(x_lon, y_lat, color=clr, alpha=símismo.alpha)
else:
ejes.plot(x_lon, y_lat, color=clr, alpha=símismo.alpha)
class FormaEstática(Forma):
"""
Clase de base para formas estáticas en el mapa, cuyos colores no cambian.
"""
def __init__(símismo, archivo, color, llenar, alpha):
símismo.color = color
super().__init__(archivo, llenar=llenar, alpha=alpha)
def dibujar(símismo, ejes, fig, args_color=None):
símismo._dibujar_frm(ejes, color=símismo.color)
class FormaDinámica(Forma):
"""
Forma cuyos colores se asignan según valores numéricos.
"""
def __init__(símismo, archivo, escala_colores=None, llenar=True, alpha=1, **argsll):
"""
Parameters
----------
archivo: str
El archivo ``.shp``.
escala_colores: list or tuple or str or int or None
Lista de dos colores para establecer una escala de colores. Si es un solo color, se agregará el color
blanco. Si es ``-1``, se inverserán los colores automáticos.
llenar: bool
Si hay que llenar la forma o simplement delinear su contorno.
alpha: float or int
La opacidad del interior de la forma. Solamente aplica si ``llenar`` est ``False``.
"""
super().__init__(archivo, llenar=llenar, alpha=alpha, **argsll)
símismo.escala_colores = símismo._resolver_colores(escala_colores)
símismo.valores = np.full(len(símismo.forma.shapes()), np.nan)
símismo.unidades = None
símismo.escala = None
def estab_valores(símismo, valores, escala_valores=None, unidades=None):
"""
Establece los valores para colorar.
Parameters
----------
valores: np.ndarray or dict
Los valores para dibujar. Debe ser del mismo tamaño que el archivo ``.shp`` en ``archivo``.
escala_valores: tuple or list or None
La escala para el rango de colores. Si es ``None``, se ajustará el rango según de los valores dados.
unidades: str, optional
Las unidades.
"""
símismo.unidades = unidades
símismo._llenar_valores(valores)
if escala_valores is None:
if np.all(np.isnan(símismo.valores)):
escala_valores = (0, 1)
else:
escala_valores = (np.nanmin(símismo.valores), np.nanmax(símismo.valores))
if escala_valores[0] == escala_valores[1]:
escala_valores = (escala_valores[0] - 0.5, escala_valores[0] + 0.5)
símismo.escala = escala_valores
def dibujar(símismo, ejes, fig, args_color=None):
args_color = args_color or {}
vals_norm = (símismo.valores - símismo.escala[0]) / (símismo.escala[1] - símismo.escala[0])
d_clrs = _gen_d_mapacolores(colores=símismo.escala_colores)
mapa_color = colors.LinearSegmentedColormap('mapa_color', d_clrs)
norm = colors.Normalize(vmin=símismo.escala[0], vmax=símismo.escala[1])
cpick = cm.ScalarMappable(norm=norm, cmap=mapa_color)
cpick.set_array(np.array([]))
v_cols = mapa_color(vals_norm)
v_cols[np.isnan(vals_norm)] = 1
símismo._dibujar_frm(ejes=ejes, color=v_cols)
if símismo.unidades is not None:
fig.colorbar(cpick, label=símismo.unidades, ax=ejes, **args_color)
else:
fig.colorbar(cpick, ax=ejes, **args_color)
@staticmethod
def _resolver_colores(colores):
if colores is None:
return ['#FF6666', '#FFCC66', '#00CC66']
elif colores == -1:
return ['#00CC66', '#FFCC66', '#FF6666']
elif isinstance(colores, str):
return ['#FFFFFF', colores]
return colores
def _extraer_col(símismo, col):
nombres_attr = [field[0] for field in símismo.forma.fields[1:]]
try:
return [x.record[nombres_attr.index(col)] for x in símismo.forma.shapeRecords()]
except ValueError:
raise ValueError(_('La columna "{}" no existe en la base de datos.').format(col))
def _llenar_valores(símismo, valores):
raise NotImplementedError
class FormaDinámicaNumérica(FormaDinámica):
"""
Forma dinámica cuyos valores se asignan a los polígonos de la forma ``.shp`` por su orden en la matriz de valores.
"""
def __init__(símismo, archivo, col_id=None, escala_colores=None, llenar=True, alpha=1):
"""
Parameters
----------
archivo: str
El archivo ``.shp``.
col_id: str, optional
La columna con el número de cada polígono en la forma ``.shp``. Si es ``None``, se asiñará número según
su orden en la forma ``.shp``.
escala_colores: list or tuple or str or int or None
Lista de dos colores para establecer una escala de colores. Si es un solo color, se agregará el color
blanco. Si es ``-1``, se inverserán los colores automáticos.
llenar: bool
Si hay que llenar la forma o simplement delinear su contorno.
alpha: float or int
La opacidad del interior de la forma. Solamente aplica si ``llenar`` est ``False``.
"""
super().__init__(archivo, escala_colores, llenar, alpha)
if col_id is not None:
ids = np.array(símismo._extraer_col(col_id))
símismo.ids = ids - np.min(ids)
else:
símismo.ids = None
def _llenar_valores(símismo, valores):
if símismo.ids is None:
símismo.valores[:] = valores
else:
símismo.valores[:] = valores[símismo.ids]
class FormaDinámicaNombrada(FormaDinámica):
"""
Forma dinámica cuyos valores se asignan a los polígonos de la forma ``.shp`` por su llave en el diccionario de
valores.
"""
def __init__(símismo, archivo, col_id, escala_colores=None, llenar=True, alpha=1, **argsll):
"""
Parameters
----------
archivo: str
La archivo ``.shp``.
col_id: str
La columna en el archivo ``.shp`` con el nomrbe de cada polígono.
escala_colores: list or tuple or str or int or None
Lista de dos colores para establecer una escala de colores. Si es un solo color, se agregará el color
blanco. Si es ``-1``, se inverserán los colores automáticos.
llenar: bool
Si hay que llenar la forma o simplement delinear su contorno.
alpha: float or int
La opacidad del interior de la forma. Solamente aplica si ``llenar`` est ``False``.
"""
super().__init__(archivo, escala_colores, llenar, alpha, **argsll)
símismo.ids = [str(x) for x in símismo._extraer_col(col_id)]
def _llenar_valores(símismo, valores):
símismo.valores[:] = np.nan
for id_, val in valores.items():
i = símismo.ids.index(id_)
símismo.valores[i] = val
class Agua(FormaEstática):
"""
Representa áreas de agua.
"""
def __init__(símismo, archivo, llenar=True):
"""
Parameters
----------
archivo: str
El archivo ``.shp``.
llenar: bool
Si hay que llenar el cuerpo de agua o no.
"""
super().__init__(archivo=archivo, color='#13A6DD', llenar=llenar, alpha=0.5)
class Bosque(FormaEstática):
"""
Representa áreas con bosque.
"""
def __init__(símismo, archivo):
super().__init__(archivo=archivo, color='#33A02C', llenar=True, alpha=0.7)
class Calle(FormaEstática):
"""
Representa calles.
"""
def __init__(símismo, archivo):
super().__init__(archivo=archivo, color='#585763', llenar=False, alpha=1)
class Ciudad(FormaEstática):
"""
Representa áreas urbanas.
"""
def __init__(símismo, archivo):
super().__init__(archivo=archivo, color='#FB9A99', llenar=True, alpha=1)
class OtraForma(FormaEstática):
"""
Representa otras áreas no representadas por las otras formas disonibles.
"""
def __init__(símismo, archivo):
super().__init__(archivo=archivo, color='#FFECB3', llenar=True, alpha=1)
def _hex_a_rva(hx):
"""
Convierte colores RVA a Hex.
Parameters
----------
hx: str
El valor hex.
Returns
-------
tuple
El valor rva.
"""
return tuple(int(hx.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4))
def _gen_d_mapacolores(colores):
"""
Genera un diccionario de mapa de color para MatPlotLib.
Parameters
----------
colores: list
Una lista de colores
Returns
-------
dict
Un diccionario para MatPlotLib
"""
clrs_rva = [_hex_a_rva(x) for x in colores]
# noinspection PyTypeChecker
n_colores = len(colores)
dic_c = {'red': tuple((round(i / (n_colores - 1), 2), clrs_rva[i][0] / 255, clrs_rva[i][0] / 255) for i in
range(0, n_colores)),
'green': tuple(
(round(i / (n_colores - 1), 2), clrs_rva[i][1] / 255, clrs_rva[i][1] / 255) for i in
range(0, n_colores)),
'blue': tuple(
(round(i / (n_colores - 1), 2), clrs_rva[i][2] / 255, clrs_rva[i][2] / 255) for i in
range(0, n_colores))}
return dic_c
| gpl-3.0 |
IndraVikas/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
fnielsen/dasem | dasem/data.py | 1 | 1750 | """Data.
Functions to read datasets from the data subdirectory.
"""
from os.path import join, split
from pandas import read_csv
def four_words():
"""Read and return four words odd-one-out dataset.
Returns
-------
>>> df = four_words()
>>> df.ix[0, 'word4'] == 'stol'
True
"""
filename = join(split(__file__)[0], 'data', 'four_words.csv')
df = read_csv(filename, encoding='utf-8')
return df
def verbal_analogies():
"""Read and return verbal analogies dataset.
Returns
-------
df : pandas.DataFrame
Dataframe with verbal analogies.
Examples
--------
>>> df = verbal_analogies()
>>> df.ix[0, :].tolist() == ['mand', 'kvinde', 'dreng', 'pige']
True
"""
filename = join(split(__file__)[0], 'data', 'verbal_analogies.csv')
df = read_csv(filename, encoding='utf-8')
return df
def wordsim353(include_problems=False):
"""Read and return wordsim353 dataset.
Parameters
----------
include_problems : bool, optional
Indicator for whether rows with problematic translations
between Danish and English should be returned [default: False].
Returns
-------
df : pandas.DataFrame
Dataframe with Danish wordsim353 data.
Examples
--------
>>> df = wordsim353(include_problems=True)
>>> df.shape
(353, 6)
References
----------
The WordSimilarity-353 Test Collection,
http://www.cs.technion.ac.il/~gabr/resources/data/wordsim353/
"""
filename = join(split(__file__)[0], 'data', 'wordsim353-da',
'combined.csv')
df = read_csv(filename, encoding='utf-8')
if not include_problems:
df = df[df.Problem != 1]
return df
| apache-2.0 |
chrisburr/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
hugobowne/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
ulisespereira/LearningSequences | Plasticity/popModel/sequence_norm_bf_adaptation.py | 1 | 5722 | import numpy as np
from scipy import sparse
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import math as mt
from stimulus import *
from myintegrator import *
import cProfile
import json
# this is the transfer function
def phi(x,theta,uc):
myresult=nu*(x-theta)
myresult[x<theta]=0.
myresult[x>uc]=nu*(uc-theta)
return myresult
def mytauInv(x): #time scale function synapses
myresult=np.zeros(len(x))
myresult[x>thres]=1/tau_learning
return myresult
def winf(x_hist):
pre_u=x_hist[0]
post_u=x_hist[-1]
#parameters
n=len(pre_u)
vec_pre=0.5*(np.ones(n)+np.tanh(a_pre*pre_u+b_pre))
return (wmax/2.)*np.outer((np.ones(n)+np.tanh(a_post*post_u+b_post)),vec_pre)
#function for the field
#x_hist is the 'historic' of the x during the delay period the zero is the oldest and the -1 is the newest
def tauWinv(x_hist):
pre_u=x_hist[0]
post_u=x_hist[-1]
n=len(pre_u)
#return np.add.outer(1/mytau(post_u),1/mytau(pre_u))
return np.outer(mytauInv(post_u),mytauInv(pre_u))
def field(t,a,x_hist,W):
pre_u=x_hist[0]
post_u=x_hist[-1]
n=len(pre_u)
field_u=(1/tau)*(mystim.stim(t)+W.dot(phi(x_hist[-1],theta,uc))-x_hist[-1]-w_inh*np.dot(r1_matrix,phi(x_hist[-1],theta,uc)))-a
field_a=(-a+beta*x_hist[-1])/tau_a
field_w=np.multiply(tauWinv(x_hist),winf(x_hist)-W)
return field_a,field_u,field_w
#This are a the parameters of the simulation
#open parameters of the model
n=20 #n pop
delay=0.5
tau=1. #timescale of populations
w_i=2.1
nu=4.
theta=-0.
uc=10.
wmax=2.0
thres=8.
beta=0.1
tau_a=5.
#parameters stimulation
dt=0.1
delta=0.5
lagStim=60.
times=160
period=.1
amp=120.
a_post=0.5
b_post=-3.0
a_pre=0.5
b_pre=-3.0
tau_learning=10.
# comment this lines if you don t want to load or save the parameters of the simulation
#name='failure_learning2.param'
#save_param(name)
#load_param(name)
w_inh=w_i/n
r1_matrix=np.ones((n,n))
patterns=np.identity(n)
patterns=[patterns[:,i] for i in range(n)]
mystim=stimulus(patterns,lagStim,delta,period,times)
mystim.inten=amp
#integrator
npts=int(np.floor(delay/dt)+1) # points delay
tmax=times*(lagStim+n*(period+delta))+40
#initial conditions
a0=np.zeros((npts,n))
x0=0.01*np.ones((npts,n))
W0=[(wmax/n)*np.ones((n,n)) for i in range(npts)]
theintegrator=myintegrator(delay,dt,n,tmax)
theintegrator.fast=False
adapt,rowsum,u,Wdiag,Woffdiag,connectivity,W01,t=theintegrator.DDE_Norm_additive_adapt(field,a0,x0,W0)
#new instance of the model
#with no stimulation
#and using W int as the matrix after learning
#to see is sequences arise
#w_i=0.8
#w_inh=w_i/n
#beta=0.
tmax=200.
mystim.inten=0.
a0=np.zeros(n)
a0=np.array([a0 for i in range(npts)])
x0=np.zeros(n)
x0[0]=9.5
x0=np.array([x0 for i in range(npts)])
W0=[W01 for i in range(npts)]
theintegrator_test=myintegrator(delay,dt,n,tmax)
theintegrator_test.fast=False
adapt_test,rowsum_test,u_test,Wdiag_test,Woffdiag_test,Connectivity_test,W0_test,t_test=theintegrator_test.DDE_Norm_additive_adapt(field,a0,x0,W0)
#Plotting
#
figure=plt.figure()
#
##dynamics populations
current=figure.add_subplot(321)
#for i in range(0,3):
current.plot(t,u[:,:],'b')
mystim.inten=1.
current.plot(t,[mystim.stim(x) for x in t],'r')
current.set_ylim([-5,40])
current.set_xlabel('Time')
current.set_ylabel('Exc.Pop.Current')
current1=figure.add_subplot(322)
current1.plot(t,u[:,:],'b')
mystim.inten=2.
current1.plot(t,[mystim.stim(x) for x in t],'r')
current1.set_xlim([11120,11260])
current1.set_ylim([-5,40])
current1.set_xlabel('Time')
current1.set_ylabel('Exc.Pop.Current')
#
##dynamics synapses
synapses=figure.add_subplot(323)
for i in range(20):
for j in range(20):
synapses.plot(t,connectivity[:,i,j])
synapses.plot(t,rowsum,color='r')
synapses.set_ylim([0,2.5])
synapses.set_xlabel('Time')
synapses.set_ylabel('Connections')
#matrix
synapses1=figure.add_subplot(324)
for i in range(20):
for j in range(20):
synapses1.plot(t,connectivity[:,i,j])
synapses1.plot(t,rowsum,color='r')
synapses1.set_xlim([11120,11260])
synapses1.set_ylim([0,2.5])
synapses1.set_xlabel('Time')
synapses1.set_ylabel('Connections')
print W01
matrix_AL=figure.add_subplot(325)
matrix_AL.matshow(W01)
#np.save('connectivity',W01)
#np.save('dynamics',u_test)
##outcomes sequence
outcome=figure.add_subplot(326)
outcome.plot(t_test,u_test[:,:],'b')
outcome.set_ylim([-5,8])
outcome.set_xlim([0,20])
outcome.set_xlabel('Time')
outcome.set_ylabel('Exc.Pop.Current')
plt.show()
#figure.savefig('holi.png')
#script to save the parameters plt.show()
def save_param(name): # this is a very naive fucntion to save the parameters
myfile=open(name,'w+')
myparams=[]
myparams.append(n)
myparams.append(delay)
myparams.append(tau)
myparams.append(w_i)
myparams.append(nu)
myparams.append(theta)
myparams.append(uc)
myparams.append(wmax)
myparams.append(thres)
myparams.append(dt)
myparams.append(delta)
myparams.append(lagStim)
myparams.append(times)
myparams.append(period)
myparams.append(amp)
myparams.append(a_post)
myparams.append(b_post)
myparams.append(a_pre)
myparams.append(b_pre)
myparams.append(tau_learning)
json.dump(myparams,myfile)
def load_param(name):
myfile=open(name)
parameters=json.load(myfile)
global n,delay,tau,w_i,nu,theta,uc,wmax,thres,dt,delta,logStim,times,period,amp
n=parameters[0]
delay=parameters[1]
tau=parameters[2]
w_i=parameters[3]
nu=parameters[4]
theta=parameters[5]
uc=parameters[6]
wmax=parameter[7]
thres=parameters[8]
dt=parameters[9]
delta=parameters[10]
logStim=parameters[11]
times=parameters[12]
period=parameters[13]
amp=parameters[14]
a_post=parameters[15]
b_post=parameters[16]
a_pre=parameters[17]
b_pre=parameters[18]
tau_learning=parameters[19]
| gpl-2.0 |
justincassidy/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
0todd0000/spm1d | spm1d/examples/nonparam/1d/roi/ex_anova2.py | 1 | 1477 |
import numpy as np
from matplotlib import pyplot
import spm1d
#(0) Load dataset:
dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_2x2()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_2x3()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_3x3()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_3x4()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_3x5()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_4x4()
# dataset = spm1d.data.uv1d.anova2.SPM1D_ANOVA2_4x5()
y,A,B = dataset.get_data()
#(0a) Create region of interest(ROI):
roi = np.array( [False]*y.shape[1] )
roi[25:45] = True
#(1) Conduct non-parametric test:
np.random.seed(0)
alpha = 0.05
FFn = spm1d.stats.nonparam.anova2(y, A, B, roi=roi)
FFni = FFn.inference(alpha, iterations=200)
print( FFni )
#(2) Compare with parametric result:
FF = spm1d.stats.anova2(y, A, B, equal_var=True, roi=roi)
FFi = FF.inference(alpha)
print( FFi )
#(3) Plot
pyplot.close('all')
pyplot.figure(figsize=(15,4))
for i,(Fi,Fni) in enumerate( zip(FFi,FFni) ):
ax = pyplot.subplot(1,3,i+1)
Fni.plot(ax=ax)
Fni.plot_threshold_label(ax=ax, fontsize=8)
Fni.plot_p_values(ax=ax, size=10)
ax.axhline( Fi.zstar, color='orange', linestyle='--', label='Parametric threshold')
if (Fi.zstar > Fi.z.max()) and (Fi.zstar>Fni.zstar):
ax.set_ylim(0, Fi.zstar+1)
if i==0:
ax.legend(fontsize=10, loc='best')
ax.set_title( Fni.effect )
pyplot.show()
| gpl-3.0 |
ZenDevelopmentSystems/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Ghalko/osf.io | scripts/analytics/utils.py | 30 | 1244 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import requests
from website import util
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(app, name, content_type, file_like, node, user):
"""Upload file to OSF."""
file_like.seek(0)
with app.test_request_context():
upload_url = util.waterbutler_url_for('upload', 'osfstorage', name, node, user=user)
requests.put(
upload_url,
data=file_like,
headers={'Content-Type': content_type},
)
| apache-2.0 |
vovojh/gem5 | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
caidongyun/pylearn2 | pylearn2/scripts/datasets/browse_norb.py | 44 | 15741 | #!/usr/bin/env python
"""
A browser for the NORB and small NORB datasets. Navigate the images by
choosing the values for the label vector. Note that for the 'big' NORB
dataset, you can only set the first 5 label dimensions. You can then cycle
through the 3-12 images that fit those labels.
"""
import sys
import os
import argparse
import numpy
import warnings
try:
import matplotlib
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
matplotlib = None
pyplot = None
from pylearn2.datasets.new_norb import NORB
from pylearn2.utils import safe_zip, serial
def _parse_args():
parser = argparse.ArgumentParser(
description="Browser for NORB dataset.")
parser.add_argument('--which_norb',
type=str,
required=False,
choices=('big', 'small'),
help="'Selects the (big) NORB, or the Small NORB.")
parser.add_argument('--which_set',
type=str,
required=False,
choices=('train', 'test', 'both'),
help="'train', or 'test'")
parser.add_argument('--pkl',
type=str,
required=False,
help=".pkl file of NORB dataset")
parser.add_argument('--stereo_viewer',
action='store_true',
help="Swaps left and right stereo images, so you "
"can see them in 3D by crossing your eyes.")
parser.add_argument('--no_norm',
action='store_true',
help="Don't normalize pixel values")
result = parser.parse_args()
if (result.pkl is not None) == (result.which_norb is not None or
result.which_set is not None):
print("Must supply either --pkl, or both --which_norb and "
"--which_set.")
sys.exit(1)
if (result.which_norb is None) != (result.which_set is None):
print("When not supplying --pkl, you must supply both "
"--which_norb and --which_set.")
sys.exit(1)
if result.pkl is not None:
if not result.pkl.endswith('.pkl'):
print("--pkl must be a filename that ends in .pkl")
sys.exit(1)
if not os.path.isfile(result.pkl):
print("couldn't find --pkl file '%s'" % result.pkl)
sys.exit(1)
return result
def _make_grid_to_short_label(dataset):
"""
Returns an array x such that x[a][b] gives label index a's b'th unique
value. In other words, it maps label grid indices a, b to the
corresponding label value.
"""
unique_values = [sorted(list(frozenset(column)))
for column
in dataset.y[:, :5].transpose()]
# If dataset contains blank images, removes the '-1' labels
# corresponding to blank images, since they aren't contained in the
# label grid.
category_index = dataset.label_name_to_index['category']
unique_categories = unique_values[category_index]
category_to_name = dataset.label_to_value_funcs[category_index]
if any(category_to_name(category) == 'blank'
for category in unique_categories):
for d in range(1, len(unique_values)):
assert unique_values[d][0] == -1, ("unique_values: %s" %
str(unique_values))
unique_values[d] = unique_values[d][1:]
return unique_values
def _get_blank_label(dataset):
"""
Returns the label vector associated with blank images.
If dataset is a Small NORB (i.e. it has no blank images), this returns
None.
"""
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
# Returns None if there is no 'blank' category (e.g. if we're using
# the small NORB dataset.
return None
assert blank_name == 'blank'
blank_rowmask = dataset.y[:, category_index] == blank_label
blank_labels = dataset.y[blank_rowmask, :]
if not blank_rowmask.any():
return None
if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]):
raise ValueError("Expected all labels of category 'blank' to have "
"the same value, but they differed.")
return blank_labels[0, :].copy()
def _make_label_to_row_indices(labels):
"""
Returns a map from short labels (the first 5 elements of the label
vector) to the list of row indices of rows in the dense design matrix
with that label.
For Small NORB, all unique short labels have exactly one row index.
For big NORB, a short label can have 0-N row indices.
"""
result = {}
for row_index, label in enumerate(labels):
short_label = tuple(label[:5])
if result.get(short_label, None) is None:
result[short_label] = []
result[short_label].append(row_index)
return result
def main():
"""Top-level function."""
args = _parse_args()
if args.pkl is not None:
dataset = serial.load(args.pkl)
else:
dataset = NORB(args.which_norb, args.which_set)
# Indexes into the first 5 labels, which live on a 5-D grid.
grid_indices = [0, ] * 5
grid_to_short_label = _make_grid_to_short_label(dataset)
# Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
# that have those labels.
label_to_row_indices = _make_label_to_row_indices(dataset.y)
# Indexes into the row index lists returned by label_to_row_indices.
object_image_index = [0, ]
blank_image_index = [0, ]
blank_label = _get_blank_label(dataset)
# Index into grid_indices currently being edited
grid_dimension = [0, ]
dataset_is_stereo = 's' in dataset.view_converter.axes
figure, all_axes = pyplot.subplots(1,
3 if dataset_is_stereo else 2,
squeeze=True,
figsize=(10, 3.5))
set_name = (os.path.split(args.pkl)[1] if args.which_set is None
else "%sing set" % args.which_set)
figure.canvas.set_window_title("NORB dataset (%s)" % set_name)
label_text = figure.suptitle('Up/down arrows choose label, '
'left/right arrows change it',
x=0.1,
horizontalalignment="left")
# Hides axes' tick marks
for axes in all_axes:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
text_axes, image_axes = (all_axes[0], all_axes[1:])
image_captions = (('left', 'right') if dataset_is_stereo
else ('mono image', ))
if args.stereo_viewer:
image_captions = tuple(reversed(image_captions))
for image_ax, caption in safe_zip(image_axes, image_captions):
image_ax.set_title(caption)
text_axes.set_frame_on(False) # Hides background of text_axes
def is_blank(grid_indices):
assert len(grid_indices) == 5
assert all(x >= 0 for x in grid_indices)
ci = dataset.label_name_to_index['category'] # category index
category = grid_to_short_label[ci][grid_indices[ci]]
category_name = dataset.label_to_value_funcs[ci](category)
return category_name == 'blank'
def get_short_label(grid_indices):
"""
Returns the first 5 elements of the label vector pointed to by
grid_indices. We use the first 5, since they're the labels used by
both the 'big' and Small NORB datasets.
"""
# Need to special-case the 'blank' category, since it lies outside of
# the grid.
if is_blank(grid_indices): # won't happen with SmallNORB
return tuple(blank_label[:5])
else:
return tuple(grid_to_short_label[i][g]
for i, g in enumerate(grid_indices))
def get_row_indices(grid_indices):
short_label = get_short_label(grid_indices)
return label_to_row_indices.get(short_label, None)
axes_to_pixels = {}
def redraw(redraw_text, redraw_images):
row_indices = get_row_indices(grid_indices)
if row_indices is None:
row_index = None
image_index = 0
num_images = 0
else:
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)[0]
row_index = row_indices[image_index]
num_images = len(row_indices)
def draw_text():
if row_indices is None:
padding_length = dataset.y.shape[1] - len(grid_indices)
current_label = (tuple(get_short_label(grid_indices)) +
(0, ) * padding_length)
else:
current_label = dataset.y[row_index, :]
label_names = dataset.label_index_to_name
label_values = [label_to_value(label) for label_to_value, label
in safe_zip(dataset.label_to_value_funcs,
current_label)]
lines = ['%s: %s' % (t, v)
for t, v
in safe_zip(label_names, label_values)]
if dataset.y.shape[1] > 5:
# Inserts image number & blank line between editable and
# fixed labels.
lines = (lines[:5] +
['No such image' if num_images == 0
else 'image: %d of %d' % (image_index + 1,
num_images),
'\n'] +
lines[5:])
# prepends the current index's line with an arrow.
lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]
text_axes.clear()
# "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
text_axes.text(0, 0.5, # coords
'\n'.join(lines),
verticalalignment='center',
transform=text_axes.transAxes)
def draw_images():
if row_indices is None:
for axis in image_axes:
axis.clear()
else:
data_row = dataset.X[row_index:row_index + 1, :]
axes_names = dataset.view_converter.axes
assert len(axes_names) in (4, 5)
assert axes_names[0] == 'b'
assert axes_names[-3] == 0
assert axes_names[-2] == 1
assert axes_names[-1] == 'c'
def draw_image(image, axes):
assert len(image.shape) == 2
norm = matplotlib.colors.NoNorm() if args.no_norm else None
axes_to_pixels[axes] = image
axes.imshow(image, norm=norm, cmap='gray')
if 's' in axes_names:
image_pair = \
dataset.get_topological_view(mat=data_row,
single_tensor=True)
# Shaves off the singleton dimensions
# (batch # and channel #), leaving just 's', 0, and 1.
image_pair = tuple(image_pair[0, :, :, :, 0])
if args.stereo_viewer:
image_pair = tuple(reversed(image_pair))
for axis, image in safe_zip(image_axes, image_pair):
draw_image(image, axis)
else:
image = dataset.get_topological_view(mat=data_row)
image = image[0, :, :, 0]
draw_image(image, image_axes[0])
if redraw_text:
draw_text()
if redraw_images:
draw_images()
figure.canvas.draw()
default_status_text = ("mouseover image%s for pixel values" %
("" if len(image_axes) == 1 else "s"))
status_text = figure.text(0.5, 0.1, default_status_text)
def on_mouse_motion(event):
original_text = status_text.get_text()
if event.inaxes not in image_axes:
status_text.set_text(default_status_text)
else:
pixels = axes_to_pixels[event.inaxes]
row = int(event.ydata + .5)
col = int(event.xdata + .5)
status_text.set_text("Pixel value: %g" % pixels[row, col])
if status_text.get_text != original_text:
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_index_type(step):
num_dimensions = len(grid_indices)
if dataset.y.shape[1] > 5:
# If dataset is big NORB, add one for the image index
num_dimensions += 1
grid_dimension[0] = add_mod(grid_dimension[0],
step,
num_dimensions)
def incr_index(step):
assert step in (0, -1, 1), ("Step was %d" % step)
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)
if grid_dimension[0] == 5: # i.e. the image index
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# increment the image index
image_index[0] = add_mod(image_index[0],
step,
len(row_indices))
else:
# increment one of the grid indices
gd = grid_dimension[0]
grid_indices[gd] = add_mod(grid_indices[gd],
step,
len(grid_to_short_label[gd]))
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# some grid indices have 2 images instead of 3.
image_index[0] = min(image_index[0], len(row_indices))
# Disables left/right key if we're currently showing a blank,
# and the current index type is neither 'category' (0) nor
# 'image number' (5)
disable_left_right = (is_blank(grid_indices) and
not (grid_dimension[0] in (0, 5)))
if event.key == 'up':
incr_index_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_index_type(1)
redraw(True, False)
elif event.key == 'q':
sys.exit(0)
elif not disable_left_right:
if event.key == 'left':
incr_index(-1)
redraw(True, True)
elif event.key == 'right':
incr_index(1)
redraw(True, True)
figure.canvas.mpl_connect('key_press_event', on_key_press)
figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
ChanderG/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
happyx2/asspy | asspy/lexrank.py | 1 | 6324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
import codecs
import collections
import numpy
import networkx
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import pairwise_distances
import tools
from misc.divrank import divrank, divrank_scipy
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk import pos_tag
lemmatiser = WordNetLemmatizer()
def lexrank(sentences, continuous=False, sim_threshold=0.1, alpha=0.9,
use_divrank=False, divrank_alpha=0.25):
'''
compute centrality score of sentences.
Args:
sentences: [u'Hi.', u'I am fine.', ... ]
continuous: if True, apply continuous LexRank. (see reference)
sim_threshold: if continuous is False and smilarity is greater or
equal to sim_threshold, link the sentences.
alpha: the damping factor of PageRank and DivRank
divrank: if True, apply DivRank instead of PageRank
divrank_alpha: strength of self-link [0.0-1.0]
(it's not the damping factor, see divrank.py)
Returns: tuple
(
{
# sentence index -> score
0: 0.003,
1: 0.002,
...
},
similarity_matrix
)
Reference:
Günes Erkan and Dragomir R. Radev.
LexRank: graph-based lexical centrality as salience in text
summarization. (section 3)
http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html
'''
# configure ranker
ranker_params = {'max_iter': 1000}
if use_divrank:
ranker = divrank_scipy
ranker_params['alpha'] = divrank_alpha
ranker_params['d'] = alpha
else:
ranker = networkx.pagerank_scipy
ranker_params['alpha'] = alpha
graph = networkx.DiGraph()
# sentence -> tf
sent_tf_list = []
for sent in sentences:
tokens = word_tokenize(sent)
tokens_pos = pos_tag(tokens)
words = []
for i in range(len(tokens_pos)):
if( re.match(r'VB', tokens_pos[i][1])):
words.extend ([lemmatiser.lemmatize(tokens_pos[i][0], pos="v").encode('utf-8')])
elif( re.match(r'NN', tokens_pos[i][1])):
words.extend ([lemmatiser.lemmatize(tokens_pos[i][0], pos="n").encode('utf-8')])
elif( re.match(r'JJ', tokens_pos[i][1])):
words.extend ([lemmatiser.lemmatize(tokens_pos[i][0], pos="a").encode('utf-8')])
else:
words.extend([tokens_pos[i][0].encode('utf-8')])
tf = collections.Counter(words)
sent_tf_list.append(tf)
sent_vectorizer = DictVectorizer(sparse=True)
sent_vecs = sent_vectorizer.fit_transform(sent_tf_list)
# compute similarities between senteces
sim_mat = 1 - pairwise_distances(sent_vecs, sent_vecs, metric='cosine')
if continuous:
linked_rows, linked_cols = numpy.where(sim_mat > 0)
else:
linked_rows, linked_cols = numpy.where(sim_mat >= sim_threshold)
# create similarity graph
graph.add_nodes_from(range(sent_vecs.shape[0]))
for i, j in zip(linked_rows, linked_cols):
if i == j:
continue
weight = sim_mat[i,j] if continuous else 1.0
graph.add_edge(i, j, {'weight': weight})
scores = ranker(graph, **ranker_params)
return scores, sim_mat
def summarize(text, sent_limit=None, char_limit=None, imp_require=None,
debug=True, **lexrank_params):
'''
Args:
text: text to be summarized (unicode string)
sent_limit: summary length (the number of sentences)
char_limit: summary length (the number of characters)
imp_require: cumulative LexRank score [0.0-1.0]
Returns:
list of extracted sentences
'''
debug_info = {}
sentences = sent_tokenize(text)
scores, sim_mat = lexrank(sentences, **lexrank_params)
sum_scores = sum(scores.itervalues())
acc_scores = 0.0
indexes = set()
num_sent, num_char = 0, 0
for i in sorted(scores, key=lambda i: scores[i], reverse=True):
num_sent += 1
num_char += len(sentences[i])
if sent_limit is not None and num_sent > sent_limit:
break
if char_limit is not None and num_char > char_limit:
break
if imp_require is not None and acc_scores / sum_scores >= imp_require:
break
indexes.add(i)
acc_scores += scores[i]
if len(indexes) > 0:
summary_sents = [sentences[i] for i in sorted(indexes)]
else:
summary_sents = sentences
if debug:
debug_info.update({
'sentences': sentences, 'scores': scores
})
return summary_sents, debug_info
if __name__ == '__main__':
_usage = '''
Usage:
python lexrank.py -f <file_name> [-e <encoding> ]
[ -v lexrank | clexrank | divrank ]
[ -s <sent_limit> | -c <char_limit> | -i <imp_required> ]
Args:
-f: plain text file to be summarized
-e: input and output encoding (default: utf-8)
-v: variant of LexRank (default is 'lexrank')
-s: summary length (the number of sentences)
-c: summary length (the number of charactors)
-i: cumulative LexRank score [0.0-1.0]
'''.strip()
options, args = getopt.getopt(sys.argv[1:], 'f:e:v:s:c:i:')
options = dict(options)
if len(options) < 2:
print _usage
sys.exit(0)
fname = options['-f']
encoding = options['-e'] if '-e' in options else 'utf-8'
variant = options['-v'] if '-v' in options else 'lexrank'
sent_limit = int(options['-s']) if '-s' in options else None
char_limit = int(options['-c']) if '-c' in options else None
imp_require = float(options['-i']) if '-i' in options else None
if fname == 'stdin':
text = '\n'.join(
line for line in sys.stdin.readlines()
).decode(encoding)
else:
text = codecs.open(fname, encoding=encoding).read()
lexrank_params = {}
if variant == 'clexrank':
lexrank_params['continuous'] = True
if variant == 'divrank':
lexrank_params['use_divrank'] = True
sentences, debug_info = summarize(
text, sent_limit=sent_limit, char_limit=char_limit,
imp_require=imp_require, **lexrank_params
)
for sent in sentences:
print sent.strip().encode(encoding)
| mit |
manpen/hypergen | libs/NetworKit/scripts/DynamicBetweennessExperiments_fixed_batch.py | 3 | 4514 | from networkit import *
from networkit.dynamic import *
from networkit.centrality import *
import pandas as pd
import random
def isConnected(G):
cc = properties.ConnectedComponents(G)
cc.run()
return (cc.numberOfComponents() == 1)
def removeAndAddEdges(G, nEdges, tabu=None):
if nEdges > G.numberOfEdges() - tabu.numberOfEdges():
raise Error("G does not have enough edges")
# select random edges for removal
removed = set()
while len(removed) < nEdges:
(u, v) = G.randomEdge()
if not tabu.hasEdge(u, v) and not ((u,v) in removed or (v,u) in removed): # exclude all edges in the tabu graph
removed.add((u, v))
print (removed)
# build event streams
removeStream = []
for (u, v) in removed:
removeStream.append(GraphEvent(GraphEvent.EDGE_REMOVAL, u, v, 0))
addStream = []
for (u, v) in removed:
addStream.append(GraphEvent(GraphEvent.EDGE_ADDITION, u, v, G.weight(u, v)))
return (removeStream, addStream)
def setRandomWeights(G, mu, sigma):
"""
Add random weights, normal distribution with mean mu and standard deviation sigma
"""
for (u, v) in G.edges():
w = random.normalvariate(mu, sigma)
G.setWeight(u, v, w)
return G
def test(G, nEdges, batchSize, epsilon, delta, size):
# find a set of nEdges to remove from G
T = graph.SpanningForest(G).generate()
(removeStream, addStream) = removeAndAddEdges(G, nEdges, tabu=T)
# remove the edges from G
updater = dynamic.GraphUpdater(G)
updater.update(removeStream)
# run the algorithms on the inital graph
print("--- IS G CONNECTED? ")
print(isConnected(G))
bc = Betweenness(G)
print("Running bc")
bc.run()
dynBc = DynBetweenness(G, True)
print("Running dyn bc with predecessors")
dynBc.run()
apprBc = ApproxBetweenness(G, epsilon, delta)
print("Running approx bc")
apprBc.run()
dynApprBc = DynApproxBetweenness(G, epsilon, delta, True)
print("Running dyn approx bc with predecessors")
dynApprBc.run()
# apply the batches
nExperiments = nEdges // batchSize
timesBc = []
timesDynBc = []
timesApprBc = []
timesDynApprBc = []
scoresBc = []
scoresApprBc = []
for i in range(nExperiments):
batch = addStream[i*batchSize : (i+1)*batchSize]
# add the edges of batch to the graph
print("GRAPH SIZE")
print(size)
totalTime = 0.0
for j in range(0, batchSize):
updater.update([batch[j]])
# update the betweenness with the dynamic exact algorithm
if size <= 2**15:
t = stopwatch.Timer()
dynBc.update(batch[j])
totalTime += t.stop()
else:
totalTime = -1
timesDynBc.append(totalTime)
# update the betweenness with the static exact algorithm
t = stopwatch.Timer()
bc.run()
x = t.stop()
timesBc.append(x)
print("Exact BC")
print(x)
print("Speedup Dyn BC (with preds)")
print(x/totalTime)
# update the betweenness with the static approximated algorithm
t = stopwatch.Timer()
apprBc.run()
x = t.stop()
timesApprBc.append(x)
print("ApprBC")
print(x)
# update the betweenness with the dynamic approximated algorithm
t = stopwatch.Timer()
dynApprBc.update(batch)
y = t.stop()
timesDynApprBc.append(y)
print("Speedup DynApprBC (with preds)")
print(x/y)
bcNormalized = [ k/(size*(size-1)) for k in bc.scores()]
scoresBc.append(bcNormalized)
scoresApprBc.append(dynApprBc.scores())
a = pd.Series(timesBc)
b = pd.Series(timesDynBc)
c = pd.Series(timesApprBc)
d = pd.Series(timesDynApprBc)
df1 = pd.DataFrame({"Static exact bc": a, "Dynamic exact bc" : b, "Static approx bc" : c, "Dynamic approx bc" : d})
dic2 = {}
for experiment in range(nExperiments):
a = pd.Series(scoresBc[experiment])
b = pd.Series(scoresApprBc[experiment])
dic2["Exact scores (exp. "+str(experiment)+")"] = a
dic2["Approx scores (exp. "+str(experiment)+")"] = b
df2 = pd.DataFrame(dic2)
return df1, df2
if __name__ == "__main__":
setNumberOfThreads(1)
# setLogLevel("INFO")
batchSize = 128
for i in range(10,21):
size = 2**i
G = generators.DorogovtsevMendesGenerator(size).generate()
G1 = Graph(G.numberOfNodes(), True, False)
for e in G.edges():
G1.addEdge(e[0], e[1], 1.0)
G1 = setRandomWeights(G1, 1, 0.1)
if (isConnected(G1)) :
nEdges = batchSize * 5
epsilon = 0.05
delta = 0.1
(df1, df2) = test(G1, nEdges, batchSize, epsilon, delta, size)
df1.to_csv("results_fixed_batch/times_weighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
df2.to_csv("results_fixed_batch/scores_weighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
else:
print("The generated graph is not connected.")
| gpl-3.0 |
rahlk/RAAT | src/tools/Discretize.py | 2 | 4964 | """
An instance filter that discretizes a range of numeric attributes in the dataset into nominal attributes. Discretization is by Fayyad & Irani's MDL method (the default).
For more information, see:
Usama M. Fayyad, Keki B. Irani: Multi-interval discretization of continuous valued attributes for classification learning. In: Thirteenth International Joint Conference on Artificial Intelligence, 1022-1027, 1993.
Igor Kononenko: On Biases in Estimating Multi-Valued Attributes. In: 14th International Joint Conference on Articial Intelligence, 1034-1040, 1995.
Dougherty, James, Ron Kohavi, and Mehran Sahami. "Supervised and unsupervised discretization of continuous features." Machine learning: proceedings of the twelfth international conference. Vol. 12. 1995.
"""
from __future__ import division, print_function
import pandas as pd
from collections import Counter
from misc import *
import numpy as np
from pdb import set_trace
from sklearn.tree import DecisionTreeClassifier as CART
def fWeight(tbl):
"""
Sort features based on entropy
"""
clf = CART(criterion='entropy')
features = tbl.columns[:-1]
klass = tbl[tbl.columns[-1]]
clf.fit(tbl[features], klass)
lbs = clf.feature_importances_
return [tbl.columns[i] for i in np.argsort(lbs)[::-1]]
def discretize(feature, klass, atleast=-1, discrete=False):
"""
Recursive Minimal Entropy Discretization
````````````````````````````````````````
Inputs:
feature: A list or a numpy array of continuous attributes
klass: A list, or a numpy array of discrete class labels.
atleast: minimum splits.
Outputs:
splits: A list containing suggested spilt locations
"""
def measure(x):
def ent(x):
C = Counter(x)
N = len(x)
return sum([-C[n]/N*np.log(C[n]/N) for n in C.keys()])
def stdev(x):
if np.isnan(np.var(x)**0.5):
return 0
return np.var(x)**0.5
if not discrete: return ent(x)
else: return stdev(x)
# Sort features and klass
feature, klass = sorted(feature), [k for (f,k) in sorted(zip(feature,klass))]
splits = []
gain = []
lvl = 0
def redo(feature, klass, lvl):
if len(feature)>0:
E = measure(klass)
N = len(klass)
T=[] # Record boundaries of splits
for k in xrange(len(feature)):
west, east = feature[:k], feature[k:]
k_w, k_e = klass[:k], klass[k:]
N_w, N_e = len(west), len(east)
T+=[N_w/N*measure(k_w)+N_e/N*measure(k_e)]
T_min = np.argmin(T)
left, right = feature[:T_min], feature[T_min:]
k_l, k_r = klass[:T_min], klass[T_min:]
# set_trace()
def stop(k,k_l,k_r):
gain = E-T[T_min]
def count(lst): return len(Counter(lst).keys())
delta = np.log2(float(3**count(k)-2)) - (
count(k)*measure(k)-count(k_l)*measure(k_l)-count(k_r)*measure(k_r))
# print(gain, (np.log2(N-1)+delta)/N)
return gain<(np.log2(N-1)+delta)/N or T_min==0
if stop(klass,k_l,k_r) and lvl>=atleast:
if discrete:
splits.append(T_min)
else:
splits.append(feature[T_min])
else:
_ = redo(feature=left, klass=k_l, lvl=lvl+1)
_ = redo(feature=right, klass=k_r, lvl=lvl+1)
# ------ main ------
redo(feature,klass, lvl=0)
# set_trace()
return splits
def _test0():
"A Test Function"
test = np.random.normal(0,10,1000).tolist()
klass = [int(abs(i)) for i in np.random.normal(0,1,1000)]
splits = discretize(feature=test, klass=klass)
set_trace()
def _test1():
tbl_loc = explore(name='ant')[0]
tbl = csv2DF(tbl_loc)
new = discreteTbl(tbl)
set_trace()
def discreteTbl(tbl,B=0.33, Prune=True):
"""
Discretize a table
``````````````````
Columns 1 to N-1 represent the independent attributes, column N the dependent.
Parameters:
tbl - A Pandas Data Frame
B - Cutoff for Pruning Columns (float between 0,1)
Prune - Prune (True/False)
Returns:
Pandas Data Frame: Discretized table
"""
dtable=[]
fweight = fWeight(tbl)
for i,name in enumerate(tbl.columns[:-1]):
new=[]
feature=tbl[name].values
klass = tbl[tbl.columns[-1]].values
splits = discretize(feature, klass)
LO, HI = min(feature), max(feature)
cutoffs = sorted(list(set(splits+[LO,HI])))
def pairs(lst):
while len(lst)>1:
yield (lst.pop(0), lst[0])
cutoffs = [t for t in pairs(sorted(list(set(splits+[LO,HI]))))]
for f in feature:
for n in cutoffs:
if n[0]<=f<n[1]:
new.append(n)
elif f==n[1]==HI:
new.append((n[0],HI))
dtable.append(new)
dtable.append(klass.tolist())
dtable = pd.DataFrame(dtable).T
dtable.columns = tbl.columns
ranks = fWeight(tbl)
if Prune:
return dtable[ranks[:int(len(ranks)*B)]+[tbl.columns[-1]]]
else:
return dtable[ranks+[tbl.columns[-1]]]
if __name__=='__main__':
_test0()
pass | mit |
theoryno3/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
hadmack/pyoscope | tests/display_rigol.py | 1 | 1116 | #!/usr/bin/env python
#
# PyUSBtmc
# display_channel.py
#
# Copyright (c) 2011 Mike Hadmack
# Copyright (c) 2010 Matt Mets
# This code is distributed under the MIT license
#
# This script is just to test rigolscope functionality as a module
#
import numpy
from matplotlib import pyplot
import sys
import os
sys.path.append(os.path.expanduser('~/Source'))
sys.path.append(os.path.expanduser('~/src'))
sys.path.append('/var/local/src')
from pyoscope import RigolScope
# Initialize our scope
scope = RigolScope("/dev/usbtmc-rigol")
scope.grabData()
data1 = scope.getScaledWaveform(1)
data2 = scope.getScaledWaveform(2)
# Now, generate a time axis.
time = scope.getTimeAxis()
# See if we should use a different time axis
if (time[599] < 1e-3):
time = time * 1e6
tUnit = "uS"
elif (time[599] < 1):
time = time * 1e3
tUnit = "mS"
else:
tUnit = "S"
# close interface
scope.close()
# Plot the data
pyplot.plot(time,data1)
pyplot.plot(time,data2)
pyplot.title("Oscilloscope Data")
pyplot.ylabel("Voltage (V)")
pyplot.xlabel("Time (" + tUnit + ")")
pyplot.xlim(time[0], time[599])
pyplot.show()
| mit |
dorianprill/CBIRjpg | plot.py | 1 | 5140 | #!/usr/bin/python3
import pickle
import matplotlib
import argparse
from itertools import product
matplotlib.use("Agg")
import matplotlib.pyplot as plt
matplotlib.style.use("ggplot")
matplotlib.rcParams.update({"font.size" : 10})
def getAvailableValues(parameter):
return sorted(list(set(r[parameter] for r in results)))
def getAvailableScoreTypes():
scoreTypes = [r["scores"].keys() for r in results]
return sorted(list(set(t for st in scoreTypes for t in st)))
def getCompressionRatios(parameters):
filteredResults = results[:]
for param, value in parameters:
filteredResults = [r for r in filteredResults if r[param] == value]
return sorted(list(set(r["compressionRatio"] for r in filteredResults)))
def getScore(parameters, scoreType):
filteredResults = results[:]
for param, value in parameters:
filteredResults = [r for r in filteredResults if r[param] == value]
if len(filteredResults) == 0:
return None
elif len(filteredResults) == 1:
return filteredResults[0]["scores"][scoreType]
else:
raise Exception("multiple results for given parameters")
def makePlot(curves, xticks, yLimits, xLabel, yLabel, title, outFile):
plt.gcf().clear()
for lineName, lineValues in curves:
xValues = [i for i in range(len(lineValues)) if lineValues[i] is not None]
yValues = [v for v in lineValues if v is not None]
line = plt.plot(xValues, yValues, label = lineName, marker = "o", markersize = 8)
plt.setp(line, linewidth = 2)
plt.xlim([-0.1, len(xticks) - 0.9])
plt.ylim(yLimits)
plt.gca().grid(linewidth = 1.5)
plt.gca().set_xticks(range(len(xticks)))
xtickFormatter = matplotlib.ticker.FuncFormatter(lambda x, pos: str(xticks[pos]))
plt.gca().get_xaxis().set_major_formatter(xtickFormatter)
plt.gca().get_xaxis().set_minor_locator(matplotlib.ticker.NullLocator())
leg = plt.legend(loc = "upper left", fancybox = True)
leg.get_frame().set_alpha(0.5)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.title(title)
plt.tight_layout()
plt.savefig(outFile)
parser = argparse.ArgumentParser()
parser.add_argument("resultsFile")
args = parser.parse_args()
results = pickle.load(open(args.resultsFile, 'rb'))
print("loaded {} results".format(len(results)))
exec(open("plotLimits.py").read())
plotCounter = 0
# compressionType - specific plots
for dataset, cType, scenario, scoreType in product(getAvailableValues("dataset"),
getAvailableValues("compressionType"),
getAvailableValues("retrievalScenario"),
getAvailableScoreTypes()):
parameters = [("dataset", dataset), ("compressionType", cType),
("retrievalScenario", scenario)]
outFile = "results/c_{}_{}_{}_{}.png".format(dataset, scoreType, scenario, cType)
xticks = getCompressionRatios(parameters)
curves = []
for descriptor in getAvailableValues("descriptor"):
values = []
for cRatio in xticks:
fullParameters = parameters + [("descriptor", descriptor), ("compressionRatio", cRatio)]
values.append(getScore(fullParameters, scoreType))
curves.append((descriptor, values))
xlabel = "Compression ratio (query only)" if scenario == "tuqc" else \
"Compression ratio (training + query)"
ylabel = scoreType + " score"
title = "compression: " + cType
makePlot(curves, xticks, yLimits[scoreType], xlabel, ylabel, title, outFile)
plotCounter += 1
print("generated {} compression-specific plots".format(plotCounter))
plotCounter = 0
# descriptor - specific plots
for dataset, descriptor, scenario, scoreType in product(getAvailableValues("dataset"),
getAvailableValues("descriptor"),
getAvailableValues("retrievalScenario"),
getAvailableScoreTypes()):
parameters = [("dataset", dataset), ("descriptor", descriptor),
("retrievalScenario", scenario)]
outFile = "results/d_{}_{}_{}_{}.png".format(dataset, scoreType, scenario, descriptor)
xticks = getCompressionRatios(parameters)
curves = []
for cType in getAvailableValues("compressionType"):
values = []
for cRatio in xticks:
fullParameters = parameters + [("compressionType", cType), ("compressionRatio", cRatio)]
values.append(getScore(fullParameters, scoreType))
curves.append((cType, values))
xlabel = "Compression ratio (query only)" if scenario == "tuqc" else \
"Compression ratio (training + query)"
ylabel = scoreType + " score"
title = "descriptor: " + descriptor
makePlot(curves, xticks, yLimits[scoreType], xlabel, ylabel, title, outFile)
plotCounter += 1
print("generated {} descriptor-specific plots".format(plotCounter))
| gpl-3.0 |
jasonfrowe/Kepler | example/transitfit5.py | 1 | 11384 | import numpy as np
from numpy import zeros
from numpy import ones
import tfit5
import fittransitmodel as ftf
import matplotlib #ploting
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import math #used for floor command
def readtt(*files):
"reading in TT files"
nmax=0 #we will first scan through the files to determine what size of array we need.
for filename in files:
if filename == 'null':
i=0
else:
f = open(filename,'r')
i=0
for line in f:
i+=1
f.close()
nmax=max(i,nmax) #save the largest number.
np=len(files) #number of planets to read in
ntt =zeros(np, dtype="int32") #allocate array for number of TTVs measured
tobs=zeros(shape=(np,nmax)) #allocate array for timestamps
omc =zeros(shape=(np,nmax)) #allocate array for O-C
i=-1 #counter for files scanned
for filename in files: #scan through all files from input
i+=1
if filename == 'null':
ntt[i]=0
else:
f = open(filename,'r')
j=-1 #counter for valid O-C read
for line in f:
line = line.strip() #get rid of line breaks
columns = line.split()
if float(columns[2]) > 0.0 :
j+=1
tobs[i,j]=float(columns[0])
omc[i,j]=float(columns[1])
ntt[i]=j+1
return ntt, tobs, omc;
def transitplot(time,flux,sol,nplanetplot=1, itime=-1, ntt=0, tobs=0, omc=0, dtype=0):
"plot the transit model"
nplanet=int((len(sol)-8)/10) #number of planets
#deal with input vars and translating to FORTRAN friendly.
if type(itime) is int :
if itime < 0 :
itime=ones(len(time))*0.020434
else:
itime=ones(len(time))*float(itime)
if type(ntt) is int :
nttin= zeros(nplanet, dtype="int32") #number of TTVs measured
tobsin= zeros(shape=(nplanet,len(time))) #time stamps of TTV measurements (days)
omcin= zeros(shape=(nplanet,len(time))) #TTV measurements (O-C) (days)
else:
nttin=ntt
tobsin=tobs
omcin=omc
if type(dtype) is int :
dtypein=ones(len(time), dtype="int32")*int(dtype) #contains data type, 0-photometry,1=RV data
#remove other planets for plotting
sol2=np.copy(sol)
for i in range(1,nplanet+1):
if i!=nplanetplot:
nc=8+10*(i-1)
sol2[nc+3]=0.0 #rdrs
tmodel= zeros(len(time)) #contains the transit model
tfit5.transitmodel(nplanet,sol2,time,itime,nttin,tobsin,omcin,tmodel,dtypein)
#make a model with only the other transits to subtract
nc=8+10*(nplanetplot-1)
sol2=np.copy(sol)
sol2[nc+3]=0.0 #rdrs
tmodel2= zeros(len(time)) #contains the transit model
tfit5.transitmodel(nplanet,sol2,time,itime,nttin,tobsin,omcin,tmodel2,dtypein)
epo=sol[nc+0] #time of center of transit
per=sol[nc+1] #orbital period
zpt=sol[7] #photometric zero-point
tdur=tfit5.transitdur(sol,1)/3600.0 #transit duration in hours
ph1=epo/per-math.floor(epo/per) #calculate phases
phase=[]
tcor=tfit5.lininterp(tobsin,omcin,nplanetplot,nttin,epo)
#print(tcor,nttin,tobsin[1,1],omcin[1,1])
for x in time:
if nttin[nplanetplot-1] > 0:
tcor=tfit5.lininterp(tobsin,omcin,nplanetplot,nttin,x)
else:
tcor=0.0
t=x-tcor
ph=(t/per-math.floor(t/per)-ph1)*per*24.0 #phase in hours offset to zero.
phase.append(ph)
phase = np.array(phase) #convert from list to array
phasesort=np.copy(phase)
fluxsort=np.copy(tmodel)
p=ones(len(phase), dtype="int32") #allocate array for output. FORTRAN needs int32
tfit5.rqsort(phase,p)
i1=0
i2=len(phase)-1
for i in range(0,len(phase)):
phasesort[i]=phase[p[i]-1]
fluxsort[i]=tmodel[p[i]-1]
if phasesort[i] < -tdur:
i1=i
if phasesort[i] < tdur:
i2=i
fplot=flux-tmodel2+1.0
plt.figure(figsize=(12,10)) #adjust size of figure
matplotlib.rcParams.update({'font.size': 22}) #adjust font
plt.scatter(phase, fplot, c="blue", s=100.0, alpha=0.35, edgecolors="none") #scatter plot
plt.plot(phasesort, fluxsort, c="red", lw=3.0)
plt.xlabel('Phase (hours)') #x-label
plt.ylabel('Relative Flux') #y-label
x1,x2,y1,y2 = plt.axis() #get range of plot
ymin=min(fplot[i1:i2])
ymax=max(fplot[i1:i2])
y1=ymin-0.1*(ymax-ymin)
y2=ymax+0.1*(ymax-ymin)
plt.axis((-tdur,tdur,y1,y2)) #readjust range
plt.show() #show the plot
return;
def transitmodel (sol,time, itime=-1, ntt=0, tobs=0, omc=0, dtype=0 ):
"read in transitmodel solution"
nplanet=int((len(sol)-8)/10) #number of planets
if type(itime) is int :
if itime < 0 :
itime=ones(len(time))*0.020434
else:
itime=ones(len(time))*float(itime)
if type(ntt) is int :
nttin= zeros(nplanet, dtype="int32") #number of TTVs measured
tobsin= zeros(shape=(nplanet,len(time))) #time stamps of TTV measurements (days)
omcin= zeros(shape=(nplanet,len(time))) #TTV measurements (O-C) (days)
else:
nttin=ntt
tobsin=tobs
omcin=omc
if type(dtype) is int :
dtypein=ones(len(time), dtype="int32")*int(dtype) #contains data type, 0-photometry,1=RV data
tmodel= zeros(len(time)) #contains the transit model
tfit5.transitmodel(nplanet,sol,time,itime,nttin,tobsin,omcin,tmodel,dtypein)
return tmodel;
def readphotometry (filename):
"reading in Kepler photometry"
time=[] #initialize arrays
flux=[]
ferr=[]
f = open(filename, 'r')
for line in f:
line = line.strip() #get rid of the \n at the end of the line
columns = line.split() #break into columns
time.append(float(columns[0])-54900.0+0.5) #correct for file zero-points to get BJD-2454900
flux.append(float(columns[1])+1.0) #photometry
ferr.append(float(columns[2])) #photometric uncertainty
f.close()
time = np.array(time)
flux = np.array(flux)
ferr = np.array(ferr)
return time, flux, ferr;
def readsol (filename):
"read in transitmodel solution"
nplanetmax=9 #maximum number of planets that an n0.dat file can handle
nplanet=0 #count number of planets found in the solution
solin=zeros(nplanetmax*10+8) #allocate array to hold parameters. init to zero.
serrin=zeros(nplanetmax*10+8)
f = open(filename, 'r')
for line in f:
line = line.strip() #get rid of the \n at the end of the line
columns = line.split() #break into columns
if columns[0][0:3]=='RHO':
solin[0]=columns[1]
serrin[0]=columns[3]
elif columns[0][0:3]=='NL1':
solin[1]=columns[1]
serrin[1]=columns[3]
elif columns[0][0:3]=='NL2':
solin[2]=columns[1]
serrin[2]=columns[3]
elif columns[0][0:3]=='NL3':
solin[3]=columns[1]
serrin[3]=columns[3]
elif columns[0][0:3]=='NL4':
solin[4]=columns[1]
serrin[4]=columns[3]
elif columns[0][0:3]=='DIL':
solin[5]=columns[1]
serrin[5]=columns[3]
elif columns[0][0:3]=='VOF':
solin[6]=columns[1]
serrin[6]=columns[3]
elif columns[0][0:3]=='ZPT':
solin[7]=columns[1]
serrin[7]=columns[3]
elif columns[0][0:2]=='EP':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+0)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='PE':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+1)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='BB':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+2)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='RD':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+3)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='EC':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+4)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='ES':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+5)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='KR':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+6)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='TE':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+7)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='EL':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+8)
solin[j]=columns[1]
serrin[j]=columns[3]
elif columns[0][0:2]=='AL':
np=float(columns[0][2])
if np>nplanet:
nplanet=np
j=int(10*(np-1)+8+9)
solin[j]=columns[1]
serrin[j]=columns[3]
f.close()
#print(nplanet)
sol=solin[0:int(nplanet*10+8)]
serr=serrin[0:int(nplanet*10+8)]
return sol, serr;
def fittrmodel(time,flux,ferr,sol,serr,itime=-1,ntt=0,tobs=0,omc=0,dtype=0):
nfit=108
npt=len(time)
solin=zeros(nfit)
solin[0:len(sol)]=sol
serrin=zeros(shape=(nfit,2))
serrin[0:len(serr),1]=serr
nplanet=int((len(sol)-8)/10) #number of planets
if type(itime) is int :
if itime < 0 :
itime=ones(len(time))*0.020434
else:
itime=ones(len(time))*float(itime)
if type(ntt) is int :
nttin= zeros(nplanet, dtype="int32") #number of TTVs measured
tobsin= zeros(shape=(nplanet,len(time))) #time stamps of TTV measurements (days)
omcin= zeros(shape=(nplanet,len(time))) #TTV measurements (O-C) (days)
else:
nttin=ntt
tobsin=tobs
omcin=omc
if type(dtype) is int :
dtypein=ones(len(time), dtype="int32")*int(dtype) #contains data type, 0-photometry,1=RV data
nfrho=1 #disable mean stellar density
rhoi=np.float(0.0)
rhoierr=zeros(9) #contains stellar density prior (currently disabled)
#fittransitmodel3(nfit,sol,serr,nplanet,npt,at,am,ae,ait,dtype,ntt,tobs,omc,nfrho,rhoi,rhoierr)
ftf.fittransitmodel3(nfit,solin,serrin,nplanet,npt,time,flux,ferr,itime,dtypein,nttin,tobsin,\
omcin,nfrho,rhoi,rhoierr)
solout=zeros(len(sol))
solout=solin[0:len(sol)]
return solout; | gpl-3.0 |
nwjs/chromium.src | tools/perf/core/external_modules.py | 10 | 1614 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow importing external modules which may be missing in some platforms.
These modules are normally provided by the vpython environment manager. But
some platforms, e.g. CromeOs, do not have access to this facility.
To be safe, instead of e.g.:
import pandas
clients should do:
from core.external_modules import pandas
Tests that require pandas to work can be skipped as follows:
from core.external_modules import pandas
@unittest.skipIf(pandas is None, 'pandas not available')
class TestsForMyModule(unittest.TestCase):
def testSomeBehavior(self):
# test some behavior that requires pandas module.
Finally, scripts that to work properly require any of these external
dependencies should call:
from core import external_modules
if __name__ == '__main__':
external_modules.RequireModules()
# the rest of your script here.
to exit early with a suitable error message if the dependencies are not
satisfied.
"""
import sys
try:
import numpy # pylint: disable=import-error
except ImportError:
numpy = None
try:
import pandas # pylint: disable=import-error
except ImportError:
pandas = None
def RequireModules():
if numpy is None or pandas is None:
sys.exit(
'ERROR: Some required python modules are not available.\n\n'
'Make sure to run this script using vpython or ensure that '
'module dependencies listed in src/.vpython are satisfied.')
| bsd-3-clause |
mbkumar/pymatgen | dev_scripts/chemenv/strategies/multi_weights_strategy_parameters.py | 5 | 15844 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Script to visualize the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import NormalizedAngleDistanceNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SelfCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DeltaCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import CNBiasNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DistanceAngleAreaNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import AngleNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import MultiWeightsChemenvStrategy
import numpy as np
import matplotlib.pyplot as plt
import copy
import json
allcg = AllCoordinationGeometries()
class CoordinationEnvironmentMorphing(object):
def __init__(self, initial_environment_symbol, expected_final_environment_symbol, morphing_description):
self.initial_environment_symbol = initial_environment_symbol
self.expected_final_environment_symbol = expected_final_environment_symbol
self.morphing_description = morphing_description
self.coordination_geometry = allcg.get_geometry_from_mp_symbol(initial_environment_symbol)
self.abstract_geometry = AbstractGeometry.from_cg(self.coordination_geometry)
@classmethod
def simple_expansion(cls, initial_environment_symbol, expected_final_environment_symbol, neighbors_indices):
morphing_description = [{'ineighbor': i_nb,
'site_type': 'neighbor',
'expansion_origin': 'central_site'} for i_nb in neighbors_indices]
return cls(initial_environment_symbol=initial_environment_symbol,
expected_final_environment_symbol=expected_final_environment_symbol,
morphing_description=morphing_description)
def figure_fractions(self, weights_options, morphing_factors=None):
if morphing_factors is None:
morphing_factors = np.linspace(1.0, 2.0, 21)
# Set up the local geometry finder
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
# Set up the weights for the MultiWeights strategy
weights = self.get_weights(weights_options)
# Set up the strategy
strat = MultiWeightsChemenvStrategy(dist_ang_area_weight=weights['DistAngArea'],
self_csm_weight=weights['SelfCSM'],
delta_csm_weight=weights['DeltaCSM'],
cn_bias_weight=weights['CNBias'],
angle_weight=weights['Angle'],
normalized_angle_distance_weight=weights['NormalizedAngDist'])
fake_valences = [-1] * (self.coordination_geometry.coordination_number + 1)
fake_valences[0] = 1
fractions_initial_environment = np.zeros_like(morphing_factors)
fractions_final_environment = np.zeros_like(morphing_factors)
for ii, morphing_factor in enumerate(morphing_factors):
print(ii)
struct = self.get_structure(morphing_factor=morphing_factor)
print(struct)
# Get the StructureEnvironments
lgf.setup_structure(structure=struct)
se = lgf.compute_structure_environments(only_indices=[0], valences=fake_valences)
strat.set_structure_environments(structure_environments=se)
result = strat.get_site_coordination_environments_fractions(site=se.structure[0], isite=0,
return_strategy_dict_info=True,
return_all=True)
for res in result:
if res['ce_symbol'] == self.initial_environment_symbol:
fractions_initial_environment[ii] = res['ce_fraction']
elif res['ce_symbol'] == self.expected_final_environment_symbol:
fractions_final_environment[ii] = res['ce_fraction']
fig_width_cm = 8.25
fig_height_cm = 7.0
fig_width = fig_width_cm / 2.54
fig_height = fig_height_cm / 2.54
fig = plt.figure(num=1, figsize=(fig_width, fig_height))
subplot = fig.add_subplot(111)
subplot.plot(morphing_factors, fractions_initial_environment, 'b-',
label='{}'.format(self.initial_environment_symbol),
linewidth=1.5)
subplot.plot(morphing_factors, fractions_final_environment, 'g--',
label='{}'.format(self.expected_final_environment_symbol), linewidth=1.5)
plt.legend(fontsize=8.0, loc=7)
plt.show()
def get_structure(self, morphing_factor):
lattice = Lattice.cubic(5.0)
myspecies = ["O"] * (self.coordination_geometry.coordination_number + 1)
myspecies[0] = "Cu"
coords = copy.deepcopy(self.abstract_geometry.points_wcs_ctwcc())
bare_points = self.abstract_geometry.bare_points_with_centre
for morphing in self.morphing_description:
if morphing['site_type'] == 'neighbor':
isite = morphing['ineighbor'] + 1
if morphing['expansion_origin'] == 'central_site':
origin = bare_points[0]
vector = bare_points[isite] - origin
coords[isite] += vector * (morphing_factor - 1.0)
else:
raise ValueError('Key "site_type" is {} while it can only be neighbor'.format(morphing['site_type']))
structure = Structure(lattice=lattice, species=myspecies, coords=coords, coords_are_cartesian=True)
return structure
def estimate_parameters(self, dist_factor_min, dist_factor_max, symmetry_measure_type='csm_wcs_ctwcc'):
only_symbols = [self.initial_environment_symbol, self.expected_final_environment_symbol]
# Set up the local geometry finder
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
# Get the StructureEnvironments
fake_valences = [-1] * (self.coordination_geometry.coordination_number + 1)
fake_valences[0] = 1
# Get the StructureEnvironments for the structure with dist_factor_min
struct = self.get_structure(morphing_factor=dist_factor_min)
lgf.setup_structure(structure=struct)
se = lgf.compute_structure_environments(only_indices=[0], valences=fake_valences,
only_symbols=only_symbols)
csm_info = se.get_csms(isite=0,
mp_symbol=self.initial_environment_symbol)
if len(csm_info) == 0:
raise ValueError('No csm found for {}'.format(self.initial_environment_symbol))
csm_info.sort(key=lambda x: x['other_symmetry_measures'][symmetry_measure_type])
csm_initial_min_dist = csm_info[0]['other_symmetry_measures'][symmetry_measure_type]
csm_info = se.get_csms(isite=0,
mp_symbol=self.expected_final_environment_symbol)
if len(csm_info) == 0:
raise ValueError('No csm found for {}'.format(self.initial_environment_symbol))
csm_info.sort(key=lambda x: x['other_symmetry_measures'][symmetry_measure_type])
csm_final = csm_info[0]['other_symmetry_measures'][symmetry_measure_type]
if not np.isclose(csm_final, 0.0, rtol=0.0, atol=1e-10):
raise ValueError('Final coordination is not perfect !')
# Get the StructureEnvironments for the structure with dist_factor_max
struct = self.get_structure(morphing_factor=dist_factor_max)
lgf.setup_structure(structure=struct)
se = lgf.compute_structure_environments(only_indices=[0], valences=fake_valences,
only_symbols=only_symbols)
csm_info = se.get_csms(isite=0,
mp_symbol=self.initial_environment_symbol)
if len(csm_info) == 0:
raise ValueError('No csm found for {}'.format(self.initial_environment_symbol))
csm_info.sort(key=lambda x: x['other_symmetry_measures'][symmetry_measure_type])
csm_initial_max_dist = csm_info[0]['other_symmetry_measures'][symmetry_measure_type]
csm_info = se.get_csms(isite=0,
mp_symbol=self.expected_final_environment_symbol)
if len(csm_info) == 0:
raise ValueError('No csm found for {}'.format(self.initial_environment_symbol))
csm_info.sort(key=lambda x: x['other_symmetry_measures'][symmetry_measure_type])
csm_final = csm_info[0]['other_symmetry_measures'][symmetry_measure_type]
if not np.isclose(csm_final, 0.0, rtol=0.0, atol=1e-10):
raise ValueError('Final coordination is not perfect !')
return {'delta_csm_min': csm_initial_min_dist, 'self_weight_max_csm': csm_initial_max_dist}
def get_weights(self, weights_options):
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
self_weight_estimator = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 5.4230949041608305,
'alpha': 1.0}}
self_csm_weight = SelfCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=self_weight_estimator)
surface_definition = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.05, 'upper': 2.0},
'angle_bounds': {'lower': 0.05, 'upper': 0.95}}
da_area_weight = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
weight_estimator = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_csm_weight = DeltaCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
bias_weight = CNBiasNbSetWeight.linearly_equidistant(weight_cn1=1.0, weight_cn13=4.0)
angle_weight = AngleNbSetWeight()
nad_weight = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=1, bb=1)
weights = {'DistAngArea': da_area_weight,
'SelfCSM': self_csm_weight,
'DeltaCSM': delta_csm_weight,
'CNBias': bias_weight,
'Angle': angle_weight,
'NormalizedAngDist': nad_weight}
return weights
if __name__ == '__main__':
print('+-------------------------------------------------------------+\n'
'| Development script of the ChemEnv utility of pymatgen |\n'
'| Definition of parameters for the MultiWeightChemenvStrategy |\n'
'+-------------------------------------------------------------+\n')
with open('ce_pairs.json', 'r') as f:
ce_pairs = json.load(f)
self_weight_max_csms = {}
self_weight_max_csms_per_cn = {}
allselfmaxcsms = []
delta_csm_mins = {}
alldeltacsmmins = []
all_cn_pairs = []
for ii in range(1, 14):
self_weight_max_csms_per_cn[str(ii)] = list()
for jj in range(ii + 1, 14):
cn_pair = '{:d}_{:d}'.format(ii, jj)
self_weight_max_csms[cn_pair] = list()
delta_csm_mins[cn_pair] = list()
all_cn_pairs.append(cn_pair)
for ce_pair_dict in ce_pairs:
ce1 = ce_pair_dict['initial_environment_symbol']
ce2 = ce_pair_dict['expected_final_environment_symbol']
cn_pair = '{}_{}'.format(ce2.split(':')[1], ce1.split(':')[1])
nb_indices = ce_pair_dict['neighbors_indices']
mindist = ce_pair_dict['dist_factor_min']
maxdist = ce_pair_dict['dist_factor_max']
morph = CoordinationEnvironmentMorphing.simple_expansion(initial_environment_symbol=ce1,
expected_final_environment_symbol=ce2,
neighbors_indices=nb_indices)
params = morph.estimate_parameters(dist_factor_min=mindist, dist_factor_max=maxdist)
print('For pair {} to {}, parameters are : '.format(ce1, ce2))
print(params)
self_weight_max_csms[cn_pair].append(params['self_weight_max_csm'])
delta_csm_mins[cn_pair].append(params['delta_csm_min'])
allselfmaxcsms.append(params['self_weight_max_csm'])
alldeltacsmmins.append(params['delta_csm_min'])
self_weight_max_csms_per_cn[ce1.split(':')[1]].append(params['self_weight_max_csm'])
fig = plt.figure(1)
subplot = fig.add_subplot(111)
for ipair, cn_pair in enumerate(all_cn_pairs):
if len(self_weight_max_csms[cn_pair]) == 0:
continue
subplot.plot(ipair * np.ones_like(self_weight_max_csms[cn_pair]), self_weight_max_csms[cn_pair], 'rx')
subplot.plot(ipair * np.ones_like(delta_csm_mins[cn_pair]), delta_csm_mins[cn_pair], 'b+')
subplot.set_xticks(range(len(all_cn_pairs)))
subplot.set_xticklabels(all_cn_pairs, rotation='vertical')
fig.savefig('self_delta_params.pdf')
fig2 = plt.figure(2)
subplot2 = fig2.add_subplot(111)
for cn in range(1, 14):
subplot2.plot(cn * np.ones_like(self_weight_max_csms_per_cn[str(cn)]), self_weight_max_csms_per_cn[str(cn)],
'rx')
subplot2.set_xticks(range(1, 14))
fig2.savefig('self_params_per_cn.pdf')
print(np.mean(allselfmaxcsms))
print(np.mean(alldeltacsmmins))
fig3 = plt.figure(3, figsize=(24, 12))
subplot3 = fig3.add_subplot(111)
for ipair, cn_pair in enumerate(all_cn_pairs):
if len(delta_csm_mins[cn_pair]) == 0:
continue
subplot3.plot(ipair * np.ones_like(delta_csm_mins[cn_pair]), delta_csm_mins[cn_pair], 'b+')
subplot3.set_xticks(range(len(all_cn_pairs)))
subplot3.set_xticklabels(all_cn_pairs, rotation='vertical')
fig3.savefig('delta_params_per_cn_pair.pdf')
plt.show()
| mit |
nojero/pod | src/neg/units.py | 1 | 7695 | #!/usr/bin/python
from net import *
from z3 import *
from time import time
from matrix import *
import pandas as pd
def z3Pair(x,y):
assert(isinstance(x, Place) and isinstance(y, Place))
return Int(str(repr(x)) + "-" + str(repr(y)))
def z3Int(x):
assert(isinstance(x, Place))
return Int(str(repr(x)))
def parseMatrix(name):
""" Reads the *.excl file and saves the matrix as a dictionary. """
## The matrix contains STRINGS
f = open(name)
rank = len(f.readlines()[-1].decode())-1
f.seek(0)
d = {}
for it, line in enumerate(f):
d["p"+str(it)] = {}
n = len(line)
points = "."*(rank-n+1)
newLine = line[:n-1] + points
for it2, pt in enumerate(newLine):
d["p"+str(it)]["p"+str(it2)] = pt
f.close()
return d
def getHalfspace(p, n):
assert(isinstance(p, Place) and isinstance(n, Net))
assert(p in n.places)
halfspace = n.m0[p]
for t in p.pre:
halfspace += Int(repr(t))
for t in p.post:
halfspace -= Int(repr(t))
return halfspace
def getMkEq(n):
assert(isinstance(n, Net))
mkEq = And(map(lambda p : getHalfspace(p,n) >= 0, n.places))
return mkEq
def findUnits(inputfile, matrix, bound, op=False, timeout=100, verbose=False):
assert(inputfile.endswith('.pnml'))
assert(matrix.endswith('.excl'))
n = Net()
n.read(inputfile)
if bound == None: bound = len(n.places)
if op: s = Optimize(); s.minimize(Int("Bound"))
else: s = Solver()#; s.add(Int("Bound") <= bound)
s.set("timeout", timeout)
excMatrix = parseMatrix(matrix)
### Prepares the encoding for exclusive places
z3Trans = map(lambda x: Int(repr(x)), n.trans)
pre = And(map(lambda x : Int(repr(x)) >= 0, n.trans))
pre = And(pre, getMkEq(n))
### This breaks the symmetry of the encoding based on the algorithm from
### "Exploiting symmetry in SMT problems" (CASE'11)
if verbose: print "Starting encoding to break symmetry"
iTime = time()
T = set(map(lambda x : Int(str(repr(x))), n.places))
ctsUnits = set(range(1, bound + 1))
cts = set()
while T != set() and len(cts) <= len(ctsUnits):
t = T.pop()
if ctsUnits.difference(cts) != set():
c = ctsUnits.difference(cts).pop()
cts.add(c)
if cts != ctsUnits:
s.add(Or(map(lambda x : t == x, cts)))
eTime = time()
if verbose: print "Done! (%s)" % (str(eTime - iTime))
### Places in the preset of a transition should belong to different units
### (unless the transition is dead, which we assume is not)
if verbose: print "Starting encoding for preset and postset"
iTime = time()
for t in n.trans:
for it, p1 in enumerate(list(t.pre)):
for p2 in list(t.pre)[it+1:]:
#para cada siuguiente, me aseguro de que esten
#en unidades diferentes, porque los dos places pertenecen al preset de una transicion
s.add(z3Pair(p1, p2) == 3)
s.add(z3Int(p1) != z3Int(p2))
for p2 in t.post:
if op and len(t.pre) == 1 and len(t.post) == 1:
s.add_soft(z3Pair(p1, p2) == 0)
elif op and len(t.pre) == 1 and len(t.post) > 1:
s.add_soft(z3Pair(p1, p2) == 1)
elif op and len(t.pre) > 1 and len(t.post) == 1:
s.add_soft(z3Pair(p1, p2) == 1)
elif op and len(t.pre) > 1 and len(t.post) > 1:
s.add_soft(z3Pair(p1, p2) == 0)
else: continue
for it, p1 in enumerate(list(t.post)):
for p2 in list(t.post)[it+1:]:
s.add(z3Pair(p1, p2) == 3)
s.add(z3Int(p1) != z3Int(p2))
eTime = time()
if verbose: print "Done! (%s)" % (str(eTime - iTime))
### Places in the initial marking belong to different units
if verbose: print "Starting encoding for initial marking"
iTime = time()
for it, p1 in enumerate(n.initialMk()):
for p2 in n.initialMk()[it+1:]:
#cada inicial en su propio grupo
s.add(z3Pair(p1,p2) == 3)
eTime = time()
if verbose: print "Done! (%s)" % (str(eTime - iTime))
if verbose: print "Starting encoding for marking equation and exclusive places"
iTime = time()
for it, p1 in enumerate(n.places):
hs1 = getHalfspace(p1, n)
mp1 = str(repr(p1))
### Domain of the variable to relate palces and units
s.add(z3Int(p1) > 0)
s.add(z3Int(p1) <= Int("Bound"))
for p2 in n.places[it+1:]:
hs2 = getHalfspace(p2, n)
mp2 = str(repr(p2))
### Domain of the variable to relate units
s.add(z3Pair(p1,p2) >= 0)
s.add(z3Pair(p1,p2) <= 3)
### If two places belong to the same unit, they should be exclusive
s.add(Implies(z3Pair(p1,p2) == 0, ForAll(z3Trans, (Implies(And(pre, hs1 > 0), hs2 == 0)))))
### We use the infromation given by the exclusive matrix
if excMatrix[mp1][mp2] == "=":
s.add(z3Pair(p1,p2) == 0)
s.add(z3Int(p1) == Z3Int(p2))
elif excMatrix[mp1][mp2] == "<":
s.add(z3Pair(p1,p2) == 1)
s.add(z3Int(p1) != Z3Int(p2))
elif excMatrix[mp1][mp2] == ">":
s.add(z3Pair(p1,p2) == 2)
s.add(z3Int(p1) != Z3Int(p2))
elif excMatrix[mp1][mp2] == ".":
if op: s.add_soft(Or(z3Pair(p1,p2) == 0, z3Int(p1)p2 == 1, z3p1p2 == 2))
else: pass
elif excMatrix[mp1][mp2] == "0":
s.add(z3Pair(p1,p2) == 3)
s.add(z3Int(p1) != Z3Int(p2))
elif excMatrix[mp1][mp2] == "1":
if op: s.add_soft(Or(z3Pair(p1,p2) == 0, z3Int(p1)p2 == 1, z3p1p2 == 2))
else: pass
elif excMatrix[mp1][mp2] == "~":
if op: s.add_soft(z3Pair(p1,p2) == 0)
else: pass
elif excMatrix[mp1][mp2] == "[":
s.add(z3Pair(p1,p2) == 1)
s.add(z3Int(p1) != Z3Int(p2))
elif excMatrix[mp1][mp2] == "]":
s.add(z3Pair(p1,p2) == 2)
s.add(z3Int(p1) != Z3Int(p2))
### If the pair variable say the belong to the same unit, we impose so
s.add(Implies(z3Pair(p1,p2) == 0, z3Int(p1) == Z3Int(p2)))
### If they are not related or related by hierarchy, they belong to different units
s.add(Implies(z3Pair(p1,p2) != 0, z3Int(p1) != Z3Int(p2)))
eTime = time()
if verbose: print "Done! (%s)" % (str(eTime - iTime))
if verbose: print "Launching the solver"
iTime = time()
lastModel = None
s.add(Int("Bound") <= bound )
sol = s.check()
while sol == sat:
lastModel = s.model()
bound = lastModel[Int("Bound")]
s.add(Int("Bound") < bound)
sol = s.check()
eTime = time()
if verbose: print "Done! (%s)" % (str(eTime - iTime))
for p in n.places:
if lastModel != None:
p.setUnit(int(str(lastModel[z3Int(p)])))
units = ""
for u in range(1, int(str(bound)) + 1):
placesForUnit = filter(lambda p : p.unit == u, n.places)
placesForUnit = map(lambda p : str(p.name), placesForUnit)
placesForUnit.append("-1")
unit = " ".join(placesForUnit)
units += "%s\n" % (unit)
return units
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_replace.py | 8 | 7896 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from .common import TestData
class TestSeriesReplace(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
ser = pd.Series(self.ts.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
# GH 5797
ser = pd.Series(pd.date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp('20120101')
result = ser.replace({pd.Timestamp('20130103'):
pd.Timestamp('20120101')})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp('20130103'),
pd.Timestamp('20120101'))
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1, 2, 3], inplace=True, method='crash_cymbal')
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = pd.Series(np.arange(5), dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
# should NOT upcast to float
e = pd.Series([0, 1, 2, 3, 4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, 'a'])
tr, v = [3, 4], [3.5, 'a']
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = pd.Series([0, 1, 2, 3.5, 1])
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace(
[dr[0], dr[1], dr[2]], [1.0, 2, 'a'])
expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, '2u')
expected = pd.Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
| gpl-3.0 |
castelao/CoTeDe | tests/qctests/test_qc_gradient.py | 1 | 1641 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
import numpy as np
from numpy import ma
from cotede.qctests.gradient import curvature, _curvature_pandas
from cotede.qctests import Gradient
from ..data import DummyData
from .compare import compare_feature_input_types, compare_input_types
def test_curvature():
"""Basic test on feature curvature
"""
x = [1, -1, 2, 2, 3, 2, 4]
y = curvature(x)
output = [np.nan, -2.5, 1.5, -0.5, 1.0, -1.5, np.nan]
assert isinstance(y, np.ndarray)
assert np.allclose(y, output, equal_nan=True)
def test_feature_input_types():
x = np.array([1, -1, 2, 2, 3, 2, 4])
compare_feature_input_types(curvature, x)
def test_standard_dataset():
profile = DummyData()
features = {
"gradient": np.array(
[
np.nan,
0.01,
0.015,
0.145,
0.605,
0.04,
1.145,
-0.67,
0.875,
-0.08,
-2.575,
1.61,
-0.045,
np.nan,
np.nan,
]
)
}
flags = {"gradient": [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 0, 9]}
cfg = {"threshold": 1.5}
y = Gradient(profile, "TEMP", cfg)
for f in features:
assert np.allclose(y.features[f], features[f], equal_nan=True)
for f in flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
def test_input_types():
cfg = {"threshold": 4}
compare_input_types(Gradient, cfg)
| bsd-3-clause |
dreadjesus/MachineLearning | NaturalLanguageProcessing/ham_spam.py | 1 | 2700 | import nltk
# nltk.download_shell()
import pandas as pd
import string
from nltk.corpus import stopwords # words like: the, me, our
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
# linux:
# read and load data
'''
messages = [line.rstrip() for line in open(
'smsspamcollection/SMSSpamCollection')]
messages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t',
names=["label", "message"])
'''
# windows:
# read and load data
messgaes = [line.rstrip() for line in open(
'D:/Github_python_ML/MachineLearning/NaturalLanguageProcessing/smsspamcollection/SMSSpamCollection'
)]
messages = pd.read_csv('D:/Github_python_ML/MachineLearning/NaturalLanguageProcessing/smsspamcollection/SMSSpamCollection', sep='\t',
names=["label", "message"])
# add feature of message len
messages['length'] = messages['message'].apply(len)
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
# apply clean function to all
messages['message'].head(5).apply(text_process)
# clean and transform "messages". (making a list of how words repet
# themself, ex: 12345 = 'example' and in message[0] we can list what words
# acoure and how manytimes for example 12345 acoure)
bow_transformer = CountVectorizer(
analyzer=text_process).fit(messages['message'])
messages_bow = bow_transformer.transform(messages['message'])
# print some status
print('Shape of Sparse Matrix: ', messages_bow.shape)
print('Amount of Non-Zero occurences: ', messages_bow.nnz)
sparsity = (100.0 * messages_bow.nnz /
(messages_bow.shape[0] * messages_bow.shape[1]))
print('sparsity: {}'.format(sparsity))
# tfidf = term frequency-inverse document frequency
tfidf_transformer = TfidfTransformer().fit(messages_bow)
messages_tfidf = tfidf_transformer.transform(messages_bow)
# Naive Bayes classsifier
spam_detect_model = MultinomialNB().fit(messages_tfidf, messages['label'])
# predict
all_predictions = spam_detect_model.predict(messages_tfidf)
print(classification_report(messages['label'], all_predictions))
| mit |
fabioticconi/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 70 | 7486 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
ssamot/ce888 | labs/lab2/salaries.py | 1 | 1518 | import matplotlib
matplotlib.use('Agg')
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# def permutation(statistic, error):
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
http://stackoverflow.com/questions/8930370/where-can-i-find-mad-mean-absolute-deviation-in-scipy
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
if __name__ == "__main__":
df = pd.read_csv('./customers.csv')
print((df.columns))
sns_plot = sns.lmplot(df.columns[0], df.columns[1], data=df, fit_reg=False)
sns_plot.axes[0,0].set_ylim(0,)
sns_plot.axes[0,0].set_xlim(0,)
sns_plot.savefig("scaterplot.png",bbox_inches='tight')
sns_plot.savefig("scaterplot.pdf",bbox_inches='tight')
data = df.values.T[1]
print((("Mean: %f")%(np.mean(data))))
print((("Median: %f")%(np.median(data))))
print((("Var: %f")%(np.var(data))))
print((("std: %f")%(np.std(data))))
print((("MAD: %f")%(mad(data))))
plt.clf()
sns_plot2 = sns.distplot(data, bins=20, kde=False, rug=True).get_figure()
axes = plt.gca()
axes.set_xlabel('Millons of pounds in sales')
axes.set_ylabel('Sales count')
sns_plot2.savefig("histogram.png",bbox_inches='tight')
sns_plot2.savefig("histogram.pdf",bbox_inches='tight')
| gpl-3.0 |
dancingdan/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 41 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
huzq/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 23 | 4413 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {:6f} with std. dev. of {:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
xhqu1981/pymatgen | pymatgen/analysis/eos.py | 5 | 17394 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
from copy import deepcopy
import six
from abc import ABCMeta, abstractmethod
import logging
import warnings
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import pretty_plot
__author__ = "Kiran Mathew, Guido Matteo"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(six.with_metaclass(ABCMeta)):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o",
color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color,
label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
class Murnaghan(EOSBase):
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return (e0 +
b0 * volume / b1 * (((v0 / volume)**b1) / (b1 - 1.0) + 1.0) -
v0 * b0 / (b1 - 1.0))
class Birch(EOSBase):
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume)**(2.0/3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.) *
((v0 / volume)**(2.0/3.0) - 1.0) ** 3)
class BirchMurnaghan(EOSBase):
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 +
9. * b0 * v0 / 16. * (eta ** 2 - 1)**2 *
(6 + b1 * (eta ** 2 - 1.) - 4. * eta ** 2))
class PourierTarantola(EOSBase):
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
squiggle = -3.*np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2))
class Vinet(EOSBase):
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 + 2. * b0 * v0 / (b1 - 1.) ** 2
* (2. - (5. + 3. * b1 * (eta - 1.) - 3. * eta)
* np.exp(-3. * (b1 - 1.) * (eta - 1.) / 2.)))
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = - v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
def _func(self, volume, params):
x = volume**(-2. / 3.)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. *
x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0**(-2./3.)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: Cormac Toher
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter('ignore', np.RankWarning)
get_rms = lambda x, y: np.sqrt(np.sum((np.array(x)-np.array(y))**2)/len(x))
# list of (energy, volume) tuples
e_v = [(i, j) for i, j in zip(self.energies, self.volumes)]
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info("total number of polynomials: {}".format(len(all_coeffs)))
norm = 0.
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms ** 2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs,
(0, max(fit_poly_order-len(coeffs), 0)),
'constant')
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS(object):
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS
}
def __init__(self, eos_name='murnaghan'):
if eos_name not in self.MODELS:
raise EOSError("The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".
format(eos_name, list(self.MODELS.keys())))
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception): pass
| mit |
huongttlan/statsmodels | statsmodels/examples/ex_kde_confint.py | 34 | 1973 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:02:59 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5),dict(loc=1, scale=.5)))
x.sort() # not needed
kde = npar.KDEUnivariate(x)
kde.fit('gau')
ci = kde.kernel.density_confint(kde.density, len(x))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(x, bins=15, normed=True, alpha=0.25)
ax.plot(kde.support, kde.density, lw=2, color='red')
ax.fill_between(kde.support, ci[:,0], ci[:,1],
color='grey', alpha='0.7')
ax.set_title('Kernel Density Gaussian (bw = %4.2f)' % kde.bw)
# use all kernels directly
x_grid = np.linspace(np.min(x), np.max(x), 51)
x_grid = np.linspace(-3, 3, 51)
kernel_names = ['Biweight', 'Cosine', 'Epanechnikov', 'Gaussian',
'Triangular', 'Triweight', #'Uniform',
]
fig = plt.figure()
for ii, kn in enumerate(kernel_names):
ax = fig.add_subplot(2, 3, ii+1) # without uniform
ax.hist(x, bins=10, normed=True, alpha=0.25)
#reduce bandwidth for Gaussian and Uniform which are to large in example
if kn in ['Gaussian', 'Uniform']:
args = (0.5,)
else:
args = ()
kernel = getattr(kernels, kn)(*args)
kde_grid = [kernel.density(x, xi) for xi in x_grid]
confint_grid = kernel.density_confint(kde_grid, len(x))
ax.plot(x_grid, kde_grid, lw=2, color='red', label=kn)
ax.fill_between(x_grid, confint_grid[:,0], confint_grid[:,1],
color='grey', alpha='0.7')
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause |
davidam/python-examples | scikit/lda.py | 2 | 4035 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.model_selection import train_test_split
style.use('fivethirtyeight')
from sklearn.neighbors import KNeighborsClassifier
# 0. Load in the data and split the descriptive and the target feature
df = pd.read_csv('data/Wine.txt',sep=',',names=['target','Alcohol','Malic_acid','Ash','Akcakinity','Magnesium','Total_pheonols','Flavanoids','Nonflavanoids','Proanthocyanins','Color_intensity','Hue','OD280','Proline'])
X = df.iloc[:,1:].copy()
target = df['target'].copy()
X_train, X_test, y_train, y_test = train_test_split(X,target,test_size=0.3,random_state=0)
# 1. Standardize the data
for col in X_train.columns:
X_train[col] = StandardScaler().fit_transform(X_train[col].values.reshape(-1,1))
# 2. Compute the mean vector mu and the mean vector per class mu_k
mu = np.mean(X_train,axis=0).values.reshape(13,1) # Mean vector mu --> Since the data has been standardized, the data means are zero
mu_k = []
for i,orchid in enumerate(np.unique(df['target'])):
mu_k.append(np.mean(X_train.where(df['target']==orchid),axis=0))
mu_k = np.array(mu_k).T
# 3. Compute the Scatter within and Scatter between matrices
data_SW = []
Nc = []
for i,orchid in enumerate(np.unique(df['target'])):
a = np.array(X_train.where(df['target']==orchid).dropna().values-mu_k[:,i].reshape(1,13))
data_SW.append(np.dot(a.T,a))
Nc.append(np.sum(df['target']==orchid))
SW = np.sum(data_SW,axis=0)
SB = np.dot(Nc*np.array(mu_k-mu),np.array(mu_k-mu).T)
# 4. Compute the Eigenvalues and Eigenvectors of SW^-1 SB
eigval, eigvec = np.linalg.eig(np.dot(np.linalg.inv(SW),SB))
# 5. Select the two largest eigenvalues
eigen_pairs = [[np.abs(eigval[i]),eigvec[:,i]] for i in range(len(eigval))]
eigen_pairs = sorted(eigen_pairs,key=lambda k: k[0],reverse=True)
w = np.hstack((eigen_pairs[0][1][:,np.newaxis].real,eigen_pairs[1][1][:,np.newaxis].real)) # Select two largest
# 6. Transform the data with Y=X*w
Y = X_train.dot(w)
# Plot the data
fig = plt.figure(figsize=(10,10))
ax0 = fig.add_subplot(111)
ax0.set_xlim(-3,3)
ax0.set_ylim(-4,3)
for l,c,m in zip(np.unique(y_train),['r','g','b'],['s','x','o']):
ax0.scatter(Y[0][y_train==l],
Y[1][y_train==l],
c=c, marker=m, label=l,edgecolors='black')
ax0.legend(loc='upper right')
# Plot the voroni spaces
means = []
for m,target in zip(['s','x','o'],np.unique(y_train)):
means.append(np.mean(Y[y_train==target],axis=0))
ax0.scatter(np.mean(Y[y_train==target],axis=0)[0],np.mean(Y[y_train==target],axis=0)[1],marker=m,c='black',s=100)
mesh_x, mesh_y = np.meshgrid(np.linspace(-3,3),np.linspace(-4,3))
mesh = []
for i in range(len(mesh_x)):
for j in range(len(mesh_x[0])):
date = [mesh_x[i][j],mesh_y[i][j]]
mesh.append((mesh_x[i][j],mesh_y[i][j]))
NN = KNeighborsClassifier(n_neighbors=1)
NN.fit(means,['r','g','b'])
predictions = NN.predict(np.array(mesh))
ax0.scatter(np.array(mesh)[:,0],np.array(mesh)[:,1],color=predictions,alpha=0.3)
plt.show()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.