repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
amitgroup/parts-net | scripts/script_parts.py | 1 | 9041 | from __future__ import division, print_function, absolute_import
import matplotlib as mpl
mpl.rc('font', size=8)
from vzlog import default as vz
import numpy as np
import amitgroup as ag
import itertools as itr
import sys
import os
from amitgroup.plot import ImageGrid
import pnet
import time
def test(ims, labels, net):
yhat = net.classify(ims)
return yhat == labels
if pnet.parallel.main(__name__):
ag.set_verbose(True)
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed')
#parser.add_argument('param', metavar='<param>', type=string)
parser.add_argument('size', metavar='<part size>', type=int)
parser.add_argument('orientations', metavar='<num orientations>', type=int)
parser.add_argument('num_parts', metavar='<number of parts>', type=int, help='number of parts')
parser.add_argument('data',metavar='<mnist data file>',type=argparse.FileType('rb'), help='Filename of data file')
parser.add_argument('seed', type=int, default=1)
parser.add_argument('saveFile', metavar='<output file>', type=argparse.FileType('wb'),help='Filename of savable model file')
parser.add_argument('--p1', type=int, default=10)
parser.add_argument('--p2', type=int, default=4)
parser.add_argument('--p3', type=int, default=3)
args = parser.parse_args()
part_size = args.size
num_orientations = args.orientations
num_parts = args.num_parts
param = args.data
saveFile = args.saveFile
seed = args.seed
param1 = args.p1
param2 = args.p2
param3 = args.p3
data = ag.io.load(param)
unsup_training_times = []
sup_training_times = []
testing_times = []
error_rates = []
all_num_parts = []
SHORT = True#False
for training_seed in [seed]:
if 1:
settings=dict(n_iter=10,
seed=0,
n_init=5,
standardize=True,
samples_per_image=100,
max_samples=10000,
uniform_weights=True,
max_covariance_samples=None,
covariance_type='diag',
min_covariance=0.0025,
logratio_thresh=-np.inf,
std_thresh=0.05,
std_thresh_frame=0,
#rotation_spreading_radius=0,
)
layers = [
pnet.OrientedGaussianPartsLayer(n_parts=8, n_orientations=1,
part_shape=(3, 3),
settings=settings),
]
if not SHORT:
layers += [
pnet.PoolingLayer(shape=(1, 1), strides=(1, 1)),
pnet.OrientedPartsLayer(n_parts=num_parts,
n_orientations=num_orientations,
part_shape=(part_size, part_size),
settings=dict(outer_frame=1,
seed=training_seed,
threshold=2,
samples_per_image=20,
max_samples=30000,
#max_samples=30000,
#train_limit=10000,
min_prob=0.00005,)),
]
elif 1:
layers = [
pnet.OrientedGaussianPartsLayer(num_parts,
num_orientations,
(part_size, part_size),
settings=dict(n_iter=10,
n_init=1,
samples_per_image=50,
max_samples=50000,
max_covariance_samples=None,
standardize=False,
covariance_type='diag',
min_covariance=0.0025,
logratio_thresh=-np.inf,
std_thresh=-0.05,
std_thresh_frame=1,
#rotation_spreading_radius=0,
),
),
#pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)),
]
elif 0:
layers = [
pnet.OrientedPartsLayer(num_parts,
num_orientations,
(part_size, part_size),
settings=dict(outer_frame=2,
em_seed=training_seed,
n_iter=5,
n_init=1,
threshold=2,
#samples_per_image=20,
samples_per_image=50,
max_samples=80000,
#max_samples=5000,
#max_samples=100000,
#max_samples=2000,
rotation_spreading_radius=0,
min_prob=0.0005,
bedges=dict(
k=5,
minimum_contrast=0.05,
spread='orthogonal',
#spread='box',
radius=1,
#pre_blurring=1,
contrast_insensitive=False,
),
)),
]
else:
layers = [
pnet.EdgeLayer(
k=5,
minimum_contrast=0.08,
spread='orthogonal',
#spread='box',
radius=1,
#pre_blurring=0,
contrast_insensitive=False,
),
pnet.PartsLayer(num_parts, (6, 6), settings=dict(outer_frame=1,
seed=training_seed,
threshold=2,
samples_per_image=40,
max_samples=100000,
#max_samples=30000,
train_limit=10000,
min_prob=0.00005,
)),
]
net = pnet.PartsNet(layers)
print('Extracting subsets...')
ims10k = data[:10000]
start0 = time.time()
print('Training unsupervised...')
net.train(lambda x: x, ims10k)
print('Done.')
end0 = time.time()
net.save(saveFile)
if 1:
N = 10
data = ims10k
feat = net.extract(lambda x: x, data[:10], layer=0)
pooling = pnet.PoolingLayer(strides=(1, 1), shape=(1, 1))
Y = pooling.extract(lambda x: x, feat)
grid4 = ImageGrid(Y.transpose((0, 3, 1, 2)))
grid4.save(vz.impath(), scale=2)
#import pdb; pdb.set_trace()
grid5 = ImageGrid(data[:10])
grid5.save(vz.impath(), scale=2)
vz.output(net)
if SHORT:
import sys; sys.exit(1)
| bsd-3-clause |
hlin117/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
harpolea/pyro2 | compressible/problems/logo.py | 2 | 2297 | from __future__ import print_function
import sys
import mesh.patch as patch
import numpy as np
from util import msg
import matplotlib.pyplot as plt
def init_data(my_data, rp):
""" initialize the logo problem """
msg.bold("initializing the logo problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in logo.py")
print(my_data.__class__)
sys.exit()
# create the logo
myg = my_data.grid
fig = plt.figure(2, (0.64, 0.64), dpi=100*myg.nx/64)
fig.add_subplot(111)
fig.text(0.5, 0.5, "pyro", transform=fig.transFigure, fontsize="16",
horizontalalignment="center", verticalalignment="center")
plt.axis("off")
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
logo = np.rot90(np.rot90(np.rot90((256-data[:, :, 1])/255.0)))
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
myg = my_data.grid
# initialize the components, remember, that ener here is rho*eint
# + 0.5*rho*v**2, where eint is the specific internal energy
# (erg/g)
dens[:, :] = 1.0
xmom[:, :] = 0.0
ymom[:, :] = 0.0
# set the density in the logo zones to be really large
logo_dens = 50.0
dens.v()[:, :] = logo[:, :] * logo_dens
# pressure equilibrium
gamma = rp.get_param("eos.gamma")
p_ambient = 1.e-5
ener[:, :] = p_ambient/(gamma - 1.0)
# explosion
ener[myg.ilo, myg.jlo] = 1.0
ener[myg.ilo, myg.jhi] = 1.0
ener[myg.ihi, myg.jlo] = 1.0
ener[myg.ihi, myg.jhi] = 1.0
def finalize():
""" print out any information to the user at the end of the run """
msg = """
The script analysis/sedov_compare.py can be used to analyze these
results. That will perform an average at constant radius and
compare the radial profiles to the exact solution. Sample exact
data is provided as analysis/cylindrical-sedov.out
"""
print(msg)
| bsd-3-clause |
bxlab/HiFive_Paper | Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/src/hiclib/binnedData.py | 2 | 64389 | #(c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Maksim Imakaev (imakaev@mit.edu)
#TODO:(MIU) Write tests for this module!
"""
Binned data - analysis of HiC, binned to resolution.
Concepts
--------
class Binned Data allows low-level manipulation of multiple HiC datasets,
binned to the same resolution from the same genome.
When working with multiple datasets, all the filters will be synchronized,
so only bins present in all datasets will be considered for the analysis.
Removal of bins from one dataset will remove them from the others.
E.g. removing 1% of bins with lowest # of count might remove more than 1% of
total bins, when working with 2 or more datasets.
Class has significant knowledge about filters that have been applied.
If an essential filter was not applied, it will throw an exception;
if advised filter is not applied, it will throw a warning.
However, it does not guarantee dependencies, and you have to think yourself.
Most of the methods have an optional "force" argument that will
ignore dependencies.
We provide example scripts that show ideal protocols for certain types of
the analysis, but they don't cover the realm of all possible manipulations
that can be performed with this class.
Input data
----------
method :py:func:`SimpleLoad <binnedData.simpleLoad>` may be used to load
the data. It automatically checks for possible genome length mismatch.
This method works best with h5dict files, created by fragmentHiC.
In this case you just need to supply the filename.
It can also accept any dictionary-like object with the following keys,
where all but "heatmap" is optional.
* ["heatmap"] : all-by-all heatmap
* ["singles"] : vector of SS reads, optional
* ["frags"] : number of rsites per bin, optional
* ["resolution"] : resolution
All information about the genome, including GC content and restriction sites,
can be obtained from the Genome class.
Genomic tracks can be loaded using an automated parser that accepts bigWig
files and fixed step wiggle files.
See documentation for :py:func:`experimentalBinnedData.loadWigFile` that
describes exactly how the data is averaged and parsed.
Variables
---------
self.dataDict - dictionary with heatmaps; keys are provided when loading
the data.
self.singlesDict - dictionary with SS read vectors. Keys are the same.
self.fragsDict - dictionary with fragment density data
self.trackDict - dictionary with genomic tracks, such as GC content.
Custom tracks should be added here.
self.biasDict - dictionary with biases as calculated by
iterative correction (incomplete)
self.PCDict - dictionary with principal components of each datasets.
Keys as in dataDict
self.EigEict - dictionary with eigenvectors for each dataset.
Keys as in datadict.
Hierarchy of filters
--------------------
This hierarchy attempts to connect all logical dependencies between
filters into one diagram.
This includes both biological dependencies and programming dependencies.
As a result, it's incomplete and might be not 100% accurate.
Generally filters from the next group should be applied after filters
from previous groups, if any.
Examples of the logic are below:
* First, apply filters that don't depend on counts,
i.e. remove diagonal and low-coverage bins.
* Second, remove regions with poor coverage;
do this before chaining heatmaps with other filters.
* Fake translocations before truncating trans, as translocations are very
high-count regions, and truncTrans will truncate them, not actuall trans reads
* Faking reads currently requires zeros to be removed.
This will be changed later
* Fake cis counts after truncating trans, so that they don't get faked with
extremely high-count outliers in a trans-map
* Perform iterative correction after all the filters are applied
* Preform PCA after IC of trans data, and with zeros removed
1. Remove Diagonal, removeBySequencedCount
2. RemovePoorRegions, RemoveStandalone (this two filters are not transitive)
3. fakeTranslocations
4. truncTrans
5. fakeCis
6. iterative correction (does not require removeZeros)
7. removeZeros
8. PCA (Requires removeZeros)
9. RestoreZeros
Besides that, filter dependencies are:
* Faking reads requires: removeZeros
* PCA requires: removeZeros, fakeCis
* IC with SS requires: no previous iterative corrections, no removed cis reads
* IC recommends removal of poor regions
Other filter dependencies, including advised but not required filters, will be
issued as warnings during runtime of a program.
-------------------------------------------------------------------------------
API documentation
-----------------
"""
import os
from mirnylib import numutils
import warnings
from mirnylib.numutils import PCA, EIG, correct, \
ultracorrectSymmetricWithVector, isInteger, \
observedOverExpected, ultracorrect, adaptiveSmoothing, \
removeDiagonals, fillDiagonal
from mirnylib.genome import Genome
import numpy as np
from math import exp
from mirnylib.h5dict import h5dict
from scipy.stats.stats import spearmanr
from mirnylib.numutils import fakeCisImpl
class binnedData(object):
"""Base class to work with binned data, the most documented and
robust part of the code. Further classes for other analysis
are inherited from this class.
"""
def __init__(self, resolution, genome, readChrms=["#", "X"]):
"""
self.__init__ - initializes an empty dataset.
This method sets up a Genome object and resolution.
Genome object specifies genome version and inclusion/exclusion
of sex chromosomes.
Parameters
----------
resolution : int
Resolution of all datasets
genome : genome Folder or Genome object
"""
if type(genome) == str:
self.genome = Genome(genomePath=genome, readChrms=readChrms)
else:
self.genome = genome
assert hasattr(self.genome, "chrmCount")
if resolution is not None:
self.resolution = resolution
self.chromosomes = self.genome.chrmLens
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.dataDict = {}
self.biasDict = {}
self.trackDict = {}
self.singlesDict = {}
self.fragsDict = {}
self.PCDict = {}
self.EigDict = {}
self.eigEigenvalueDict = {}
self.PCAEigenvalueDict = {}
self.dicts = [self.trackDict, self.biasDict, self.singlesDict,
self.fragsDict]
self.eigDicts = [self.PCDict, self.EigDict]
self._loadGC()
self.appliedOperations = {}
def _initChromosomes(self):
"internal: loads mappings from the genome class based on resolution"
self.chromosomeStarts = self.genome.chrmStartsBinCont
self.centromerePositions = self.genome.cntrMidsBinCont
self.chromosomeEnds = self.genome.chrmEndsBinCont
self.trackLength = self.genome.numBins
self.chromosomeCount = self.genome.chrmCount
self.chromosomeIndex = self.genome.chrmIdxBinCont
self.positionIndex = self.genome.posBinCont
self.armIndex = self.chromosomeIndex * 2 + \
np.array(self.positionIndex > self.genome.cntrMids
[self.chromosomeIndex], int)
def _giveMask(self):
"Returns index of all bins with non-zero read counts"
self.mask = np.ones(len(self.dataDict.values()[0]), np.bool)
for data in self.dataDict.values():
datasum = np.sum(data, axis=0)
datamask = datasum > 0
self.mask *= datamask
return self.mask
def _giveMask2D(self):
"""Returns outer product of _giveMask with itself,
i.e. bins with possibly non-zero counts"""
self._giveMask()
self.mask2D = self.mask[:, None] * self.mask[None, :]
return self.mask2D
def _loadGC(self):
"loads GC content at given resolution"
self.trackDict["GC"] = np.concatenate(self.genome.GCBin)
def _checkItertiveCorrectionError(self):
"""internal method for checking if iterative correction
might be bad to apply"""
for value in self.dataDict.values():
if isInteger(value) == True:
s = np.sum(value, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < 100:
error = int(100. / np.sqrt(sums[0]))
message1 = "Lowest 5 sums of an array rows are: " + \
str(sums[:5])
warnings.warn("\n%s\nIterative correction will lead to \
about %d %% relative error for certain columns" %
(message1, error))
if sums[0] < 5:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
else:
s = np.sum(value > 0, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < min(100, len(value) / 2):
error = int(100. / np.sqrt(sums[0]))
print "Got floating-point array for correction. Rows with \
5 least entrees are:", sums[:5]
warnings.warn("\nIterative correction might lead to about\
%d %% relative error for certain columns" % error)
if sums[0] < 4:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
def _checkAppliedOperations(self, neededKeys=[],
advicedKeys=[],
excludedKeys=[]):
"Internal method to check if all needed operations were applied"
if (True in [i in self.appliedOperations for i in excludedKeys]):
print "Operations that are not allowed:", excludedKeys
print "applied operations: ", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Prohibited filter was applied")
if (False in [i in self.appliedOperations for i in neededKeys]):
print "needed operations:", neededKeys
print "applied operations:", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Critical filter not applied")
if (False in [i in self.appliedOperations for i in advicedKeys]):
print "Adviced operations:", advicedKeys
print "Applied operations:", self.appliedOperations
warnings.warn("\nNot all adviced filters applied")
def _recoverOriginalReads(self, key):
"""Attempts to recover original read counts from the data
If data is integer, returns data.
If not, attepts to revert iterative correction
and return original copy.
This method does not modify the dataset!
"""
data = self.dataDict[key]
if "Corrected" not in self.appliedOperations:
if isInteger(data):
return data
else:
warnings.warn("Data was not corrected, but is not integer")
return None
else:
if key not in self.biasDict:
warnings.warn("Correction was applied, "
"but bias information is missing!")
return None
bias = self.biasDict[key]
data1 = data * bias[:, None]
data1 *= bias[None, :]
if isInteger(data1):
return data1
else:
warnings.warn("Attempted recovery of reads, but "
"data is not integer")
return None
def simpleLoad(self, in_data, name, chromosomeOrder=None):
"""Loads data from h5dict file or dict-like object
Parameters
----------
in_data : str or dict-like
h5dict filename or dictionary-like object with input data,
stored under the key "heatmap", and a vector of SS reads,
stored under the key "singles".
name : str
Key under which to store dataset in self.dataDict
chromosomeOrder : None or list
If file to load is a byChromosome map, use this to define chromosome order
"""
if type(in_data) == str:
path = os.path.abspath(os.path.expanduser(in_data))
if os.path.exists(path) == False:
raise IOError("HDF5 dict do not exist, %s" % path)
alldata = h5dict(path, mode="r")
else:
alldata = in_data
if type(alldata) == h5dict:
if ("0 0" in alldata.keys()) and ("heatmap" not in alldata.keys()):
if chromosomeOrder != None:
chromosomes = chromosomeOrder
else:
chromosomes = xrange(self.chromosomeCount)
datas = []
for i in chromosomes:
datas.append(np.concatenate([alldata["{0} {1}".format(i, j)] for j in chromosomes], axis=1))
newdata = {"heatmap": np.concatenate(datas)}
for i in alldata.keys():
newdata[i] = alldata[i]
alldata = newdata
self.dataDict[name] = np.asarray(alldata["heatmap"], dtype=np.double)
try:
self.singlesDict[name] = alldata["singles"]
except:
print "No SS reads found"
try:
if len(alldata["frags"]) == self.genome.numBins:
self.fragsDict[name] = alldata["frags"]
else:
print "Different bin number in frag dict"
except:
pass
if "resolution" in alldata:
if self.resolution != alldata["resolution"]:
print "resolution mismatch!!!"
print "--------------> Bye <-------------"
raise StandardError("Resolution mismatch! ")
if self.genome.numBins != len(alldata["heatmap"]):
print "Genome length mismatch!!!"
print "source genome", len(alldata["heatmap"])
print "our genome", self.genome.numBins
print "Check for readChrms parameter when you identify the genome"
raise StandardError("Genome size mismatch! ")
def export(self, name, outFilename, byChromosome=False, **kwargs):
"""
Exports current heatmaps and SS files to an h5dict.
Parameters
----------
name : str
Key for the dataset to export
outFilename : str
Where to export
byChromosome : bool or "cis" or "all"
save by chromosome heatmaps.
Ignore SS reads.
True means "all"
"""
if "out_filename" in kwargs.keys():
raise ValueError("out_filename replaced with outFilename!")
if name not in self.dataDict:
raise ValueError("No data {name}".format(name=name))
toexport = {}
if byChromosome is False:
toexport["heatmap"] = self.dataDict[name]
if name in self.singlesDict:
toexport["singles"] = self.singlesDict[name]
if name in self.fragsDict:
toexport["frags"] = self.fragsDict[name]
else:
hm = self.dataDict[name]
for i in xrange(self.genome.chrmCount):
for j in xrange(self.genome.chrmCount):
if (byChromosome == "cis") and (i != j):
continue
st1 = self.chromosomeStarts[i]
end1 = self.chromosomeEnds[i]
st2 = self.chromosomeStarts[j]
end2 = self.chromosomeEnds[j]
toexport["{0} {1}".format(i, j)] = hm[st1:end1,
st2:end2]
toexport["resolution"] = self.resolution
toexport["genome"] = self.genome.folderName
toexport["binNumber"] = len(self.chromosomeIndex)
toexport["genomeIdxToLabel"] = self.genome.idx2label
toexport["chromosomeStarts"] = self.chromosomeStarts
toexport["chromosomeIndex"] = self.chromosomeIndex
toexport["positionIndex"] = self.positionIndex
myh5dict = h5dict(outFilename, mode="w")
myh5dict.update(toexport)
def removeDiagonal(self, m=1):
"""Removes all bins on a diagonal, and bins that are up to m away
from the diagonal, including m.
By default, removes all bins touching the diagonal.
Parameters
----------
m : int, optional
Number of bins to remove
"""
for i in self.dataDict.keys():
self.dataDict[i] = np.asarray(
self.dataDict[i], dtype=np.double, order="C")
removeDiagonals(self.dataDict[i], m)
self.appliedOperations["RemovedDiagonal"] = True
self.removedDiagonalValue = m
def removeStandalone(self, offset=3):
"""removes standalone groups of bins
(groups of less-than-offset bins)
Parameters
----------
offset : int
Maximum length of group of bins to be removed
"""
diffs = np.diff(np.array(np.r_[False, self._giveMask(), False], int))
begins = np.nonzero(diffs == 1)[0]
ends = np.nonzero(diffs == -1)[0]
beginsmask = (ends - begins) <= offset
newbegins = begins[beginsmask]
newends = ends[beginsmask]
print "removing %d standalone bins" % np.sum(newends - newbegins)
mask = self._giveMask()
for i in xrange(len(newbegins)):
mask[newbegins[i]:newends[i]] = False
mask2D = mask[:, None] * mask[None, :]
antimask = np.nonzero(mask2D.flat == False)[0]
for i in self.dataDict.values():
i.flat[antimask] = 0
self.appliedOperations["RemovedStandalone"] = True
def removeBySequencedCount(self, sequencedFraction=0.5):
"""
Removes bins that have less than sequencedFraction*resolution
sequenced counts.
This filters bins by percent of sequenced counts,
and also removes the last bin if it's very short.
.. note:: this is not equivalent to mapability
Parameters
----------
sequencedFraction: float, optional, 0<x<1
Fraction of the bin that needs to be sequenced in order
to keep the bin
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
binCutoff = int(self.resolution * sequencedFraction)
sequenced = np.concatenate(self.genome.mappedBasesBin)
mask = sequenced < binCutoff
nzmask = np.zeros(
len(mask), bool) # mask of regions with non-zero counts
for i in self.dataDict.values():
sumData = np.sum(i[mask], axis=1) > 0
nzmask[mask] = nzmask[mask] + sumData
i[mask, :] = 0
i[:, mask] = 0
print "Removing %d bins with <%lf %% coverage by sequenced reads" % \
((nzmask > 0).sum(), 100 * sequencedFraction)
self.appliedOperations["RemovedUnsequenced"] = True
pass
def removePoorRegions(self, names=None, cutoff=2, coverage=False, trans=False):
"""Removes "cutoff" percent of bins with least counts
Parameters
----------
names : list of str
List of datasets to perform the filter. All by default.
cutoff : int, 0<cutoff<100
Percent of lowest-counts bins to be removed
"""
statmask = np.zeros(len(self.dataDict.values()[0]), np.bool)
mask = np.ones(len(self.dataDict.values()[0]), np.bool)
if names is None:
names = self.dataDict.keys()
for i in names:
data = self.dataDict[i]
if trans:
data = data.copy()
data[self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]] = 0
datasum = np.sum(data, axis=0)
datamask = datasum > 0
mask *= datamask
if coverage == False:
countsum = np.sum(data, axis=0)
elif coverage == True:
countsum = np.sum(data > 0, axis=0)
else:
raise ValueError("coverage is true or false!")
newmask = countsum >= np.percentile(countsum[datamask], cutoff)
mask *= newmask
statmask[(newmask == False) * (datamask == True)] = True
print "removed {0} poor bins".format(statmask.sum())
inds = np.nonzero(mask == False)
for i in self.dataDict.values():
i[inds, :] = 0
i[:, inds] = 0
self.appliedOperations["RemovedPoor"] = True
def truncTrans(self, high=0.0005):
"""Truncates trans contacts to remove blowouts
Parameters
----------
high : float, 0<high<1, optional
Fraction of top trans interactions to be removed
"""
for i in self.dataDict.keys():
data = self.dataDict[i]
transmask = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
lim = np.percentile(data[transmask], 100. * (1 - high))
print "dataset %s truncated at %lf" % (i, lim)
tdata = data[transmask]
tdata[tdata > lim] = lim
self.dataDict[i][transmask] = tdata
self.appliedOperations["TruncedTrans"] = True
def removeCis(self):
"sets to zero all cis contacts"
mask = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
for i in self.dataDict.keys():
self.dataDict[i][mask] = 0
self.appliedOperations["RemovedCis"] = True
print("All cis counts set to zero")
def fakeCisOnce(self, mask="CisCounts", silent=False):
"""Used to fake cis counts or any other region
with random trans counts.
If extra mask is supplied, it is used instead of cis counts.
This method draws fake contact once.
Use fakeCis() for iterative self-consistent faking of cis.
Parameters
----------
mask : NxN boolean array or "CisCounts"
Mask of elements to be faked.
If set to "CisCounts", cis counts will be faked
When mask is used, cis elements are NOT faked.
silent : bool
Do not print anything
"""
#TODO (MIU): check this method!
if silent == False:
print("All cis counts are substituted with matching trans count")
for key in self.dataDict.keys():
data = np.asarray(self.dataDict[key], order="C", dtype=float)
if mask == "CisCounts":
_mask = np.array(self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :], int, order="C")
else:
assert mask.shape == self.dataDict.values()[0].shape
_mask = np.array(mask, dtype=int, order="C")
_mask[self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :]] = 2
s = np.abs(np.sum(data, axis=0)) <= 1e-10
_mask[:, s] = 2
_mask[s, :] = 2
_mask = np.asarray(_mask, dtype=np.int64)
fakeCisImpl(data, _mask)
self.dataDict[key] = data
self.appliedOperations["RemovedCis"] = True
self.appliedOperations["FakedCis"] = True
def fakeCis(self, force=False, mask="CisCounts"):
"""This method fakes cis contacts in an interative way
It is done to achieve faking cis contacts that is
independent of normalization of the data.
Parameters
----------
Force : bool (optional)
Set this to avoid checks for iterative correction
mask : see fakeCisOnce
"""
self.removeCis()
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
print("All cis counts are substituted with faked counts")
print("Data is iteratively corrected as a part of faking cis counts")
def fakeTranslocations(self, translocationRegions):
"""
This method fakes reads corresponding to a translocation.
Parameters
----------
translocationRegions: list of tuples
List of tuples (chr1,start1,end1,chr2,start2,end2),
masking a high-count region around visible translocation.
If end1/end2 is None, it is treated as length of chromosome.
So, use (chr1,0,None,chr2,0,None) to remove inter-chromosomal
interaction entirely.
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
mask = np.zeros((self.genome.numBins, self.genome.numBins), int)
resolution = self.genome.resolution
for i in translocationRegions:
st1 = self.genome.chrmStartsBinCont[i[0]]
st2 = self.genome.chrmStartsBinCont[i[3]]
beg1 = st1 + i[1] / resolution
if i[2] is not None:
end1 = st1 + i[2] / resolution + 1
else:
end1 = self.genome.chrmEndsBinCont[i[0]]
beg2 = st2 + i[4] / resolution
if i[5] is not None:
end2 = st2 + i[5] / resolution + 1
else:
end2 = self.genome.chrmEndsBinCont[i[3]]
mask[beg1:end1, beg2:end2] = 1
mask[beg2:end2, beg1:end1] = 1
self.fakeCisOnce(mask)
def correct(self, names=None):
"""performs single correction without SS
Parameters
----------
names : list of str or None
Keys of datasets to be corrected. If none, all are corrected.
"""
self.iterativeCorrectWithoutSS(names, M=1)
def iterativeCorrectWithoutSS(self, names=None, M=None, force=False,
tolerance=1e-5):
"""performs iterative correction without SS
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
M : int, optional
Number of iterations to perform.
force : bool, optional
Ignore warnings and pre-requisite filters
"""
if force == False:
self._checkItertiveCorrectionError()
self._checkAppliedOperations(advicedKeys=[
"RemovedDiagonal", "RemovedPoor"])
if names is None:
names = self.dataDict.keys()
for i in names:
data, dummy, bias = ultracorrectSymmetricWithVector(
self.dataDict[i], M=M, tolerance=tolerance)
self.dataDict[i] = data
self.biasDict[i] = bias
if i in self.singlesDict:
self.singlesDict[i] = self.singlesDict[i] / bias.astype(float)
self.appliedOperations["Corrected"] = True
def adaptiveSmoothing(self, smoothness, useOriginalReads="try",
names=None, rawReadDict=None):
"""
Performs adaptive smoothing of Hi-C datasets.
Adaptive smoothing attempts to smooth low-count, "sparce" part
of a Hi-C matrix, while keeping the contrast in a high-count
"diagonal" part of the matrix.
It does it by blurring each bin pair value into a gaussian, which
should encoumpass at least **smoothness** raw reads. However, only
half of reads from each bin pair is counted into this gaussian, while
full reads from neighboring bin pairs are counted.
To summarize:
If a bin pair contains #>2*smoothness reads, it is kept intact.
If a bin pair contains #<2*smoothness reads, reads around bin pair
are counted, and a bin pair is smoothed to a circle (gaussian),
containing smoothness - (#/2) reads.
A standalone read in a sparce part of a matrix is smoothed to a
circle (gaussian) that encoumpasses smoothness reads.
.. note::
This algorithm can smooth any heatmap, e.g. corrected one.
However, ideally it needs to know raw reads to correctly leverage
the contribution from different bins.
By default, it attempts to recover raw reads. However, it
can do so only after single iterative correction.
If used after fakeCis method, it won't use raw reads, unless
provided externally.
.. warning::
Note that if you provide raw reads externally, you would need
to make a copy of dataDict prior to filtering the data,
not just a reference to it. Like
>>>for i in keys: dataCopy[i] = self.dataDict[i].copy()
Parameters
----------
smoothness : float, positive. Often >1.
Parameter of smoothness as described above
useOriginalReads : bool or "try"
If True, requires to recover original reads for smoothness
If False, treats heatmap data as reads
If "try", attempts to recover original reads;
otherwise proceeds with heatmap data.
rawReadDict : dict
A copy of self.dataDict with raw reads
"""
if names is None:
names = self.dataDict.keys()
mask2D = self._giveMask2D()
#If diagonal was removed, we should remember about it!
if hasattr(self, "removedDiagonalValue"):
removeDiagonals(mask2D, self.removedDiagonalValue)
for name in names:
data = self.dataDict[name]
if useOriginalReads is not False:
if rawReadDict is not None:
#raw reads provided externally
reads = rawReadDict[name]
else:
#recovering raw reads
reads = self._recoverOriginalReads(name)
if reads is None:
#failed to recover reads
if useOriginalReads == True:
raise RuntimeError("Cannot recover original reads!")
else:
#raw reads were not requested
reads = None
if reads is None:
reads = data # Feed this to adaptive smoothing
smoothed = np.zeros_like(data, dtype=float)
N = self.chromosomeCount
for i in xrange(N):
for j in xrange(N):
st1 = self.chromosomeStarts[i]
st2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
cur = data[st1:end1, st2:end2]
curReads = reads[st1:end1, st2:end2]
curMask = mask2D[st1:end1, st2:end2]
s = adaptiveSmoothing(matrix=cur,
cutoff=smoothness,
alpha=0.5,
mask=curMask,
originalCounts=curReads)
smoothed[st1:end1, st2:end2] = s
self.dataDict[name] = smoothed
self.appliedOperations["Smoothed"] = True
def removeChromosome(self, chromNum):
"""removes certain chromosome from all tracks and heatmaps,
setting all values to zero
Parameters
----------
chromNum : int
Number of chromosome to be removed
"""
beg = self.genome.chrmStartsBinCont[chromNum]
end = self.genome.chrmEndsBinCont[chromNum]
for i in self.dataDict.values():
i[beg:end] = 0
i[:, beg:end] = 0
for mydict in self.dicts:
for value in mydict.values():
value[beg:end] = 0
for mydict in self.eigDicts:
for value in mydict.values():
value[beg:end] = 0
def removeZeros(self, zerosMask=None):
"""removes bins with zero counts
keeps chromosome starts, ends, etc. consistent
Parameters
----------
zerosMask : length N array or None, optional
If provided, this method removes a defined set of bins
By default, it removes bins with zero # counts.
"""
if zerosMask is not None:
s = zerosMask
else:
s = np.sum(self._giveMask2D(), axis=0) > 0
for i in self.dataDict.values():
s *= (np.sum(i, axis=0) > 0)
indices = np.zeros(len(s), int)
count = 0
for i in xrange(len(indices)):
if s[i] == True:
indices[i] = count
count += 1
else:
indices[i] = count
indices = np.r_[indices, indices[-1] + 1]
N = len(self.positionIndex)
for i in self.dataDict.keys():
a = self.dataDict[i]
if len(a) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (i, len(a), N))
b = a[:, s]
c = b[s, :]
self.dataDict[i] = c
for mydict in self.dicts:
for key in mydict.keys():
if len(mydict[key]) != N:
raise ValueError("Wrong dimensions of data {0}: {1} instead of {2}".format(key, len(mydict[key]), N))
mydict[key] = mydict[key][s]
for mydict in self.eigDicts:
for key in mydict.keys():
mydict[key] = mydict[key][:, s]
if len(mydict[key][0]) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (key, len(mydict[key][0]), N))
self.chromosomeIndex = self.chromosomeIndex[s]
self.positionIndex = self.positionIndex[s]
self.armIndex = self.armIndex[s]
self.chromosomeEnds = indices[self.chromosomeEnds]
self.chromosomeStarts = indices[self.chromosomeStarts]
self.centromerePositions = indices[self.centromerePositions]
self.removeZerosMask = s
if self.appliedOperations.get("RemovedZeros", False) == True:
warnings.warn("\nYou're removing zeros twice. \
You can't restore zeros now!")
self.appliedOperations["RemovedZeros"] = True
self.genome.setResolution(-1)
return s
def restoreZeros(self, value=np.NAN):
"""Restores zeros that were removed by removeZeros command.
.. warning:: You can restore zeros only if you used removeZeros once.
Parameters
----------
value : number-like, optional.
Value to fill in missing regions. By default, NAN.
"""
if not hasattr(self, "removeZerosMask"):
raise StandardError("Zeros have not been removed!")
s = self.removeZerosMask
N = len(s)
for i in self.dataDict.keys():
a = self.dataDict[i]
self.dataDict[i] = np.zeros((N, N), dtype=a.dtype) * value
tmp = np.zeros((N, len(a)), dtype=a.dtype) * value
tmp[s, :] = a
self.dataDict[i][:, s] = tmp
for mydict in self.dicts:
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros(N, dtype=a.dtype) * value
mydict[key][s] = a
for mydict in self.eigDicts:
#print mydict
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros((len(a), N), dtype=a.dtype) * value
mydict[key][:, s] = a
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.appliedOperations["RemovedZeros"] = False
def doPCA(self, force=False):
"""performs PCA on the data
creates dictionary self.PCADict with results
Last column of PC matrix is first PC, second to last - second, etc.
Returns
-------
Dictionary of principal component matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentPCA, eigenvalues = PCA(self.dataDict[i])
self.PCAEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentPCA)):
if spearmanr(currentPCA[j], self.trackDict["GC"])[0] < 0:
currentPCA[j] = -currentPCA[j]
self.PCDict[i] = currentPCA
return self.PCDict
def doEig(self, numPCs=3, force=False):
"""performs eigenvector expansion on the data
creates dictionary self.EigDict with results
Last row of the eigenvector matrix is the largest eigenvector, etc.
Returns
-------
Dictionary of eigenvector matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentEIG, eigenvalues = EIG(self.dataDict[i], numPCs=numPCs)
self.eigEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentEIG)):
if spearmanr(currentEIG[j], self.trackDict["GC"])[0] < 0:
currentEIG[j] = -currentEIG[j]
self.EigDict[i] = currentEIG
return self.EigDict
def doCisPCADomains(
self, numPCs=3, swapFirstTwoPCs=False, useArms=True,
corrFunction=lambda x, y: spearmanr(x, y)[0],
domainFunction="default"):
"""Calculates A-B compartments based on cis data.
All PCs are oriented to have positive correlation with GC.
Writes the main result (PCs) in the self.PCADict dictionary.
Additionally, returns correlation coefficients with GC; by chromosome.
Parameters
----------
numPCs : int, optional
Number of PCs to compute
swapFirstTwoPCs : bool, by default False
Swap first and second PC if second has higher correlation with GC
useArms : bool, by default True
Use individual arms, not chromosomes
corr function : function, default: spearmanr
Function to compute correlation with GC.
Accepts two arrays, returns correlation
domain function : function, optional
Function to calculate principal components of a square matrix.
Accepts: N by N matrix
returns: numPCs by N matrix
Default does iterative correction, then observed over expected.
Then IC
Then calculates correlation matrix.
Then calculates PCA of correlation matrix.
other options: metaphasePaper (like in Naumova, Science 2013)
.. note:: Main output of this function is written to self.PCADict
Returns
-------
corrdict,lengthdict
Dictionaries with keys for each dataset.
Values of corrdict contains an M x numPCs array with correlation
coefficient for each chromosome (or arm) with non-zero length.
Values of lengthdict contain lengthds of chromosomes/arms.
These dictionaries can be used to calculate average correlation
coefficient by chromosome (or by arm).
"""
corr = corrFunction
if (type(domainFunction) == str):
domainFunction = domainFunction.lower()
if domainFunction in ["metaphasepaper", "default", "lieberman",
"erez", "geoff", "lieberman+", "erez+"]:
fname = domainFunction
def domainFunction(chrom):
#orig = chrom.copy()
M = len(chrom.flat)
toclip = 100 * min(0.999, (M - 10.) / M)
removeDiagonals(chrom, 1)
chrom = ultracorrect(chrom)
chrom = observedOverExpected(chrom)
chrom = np.clip(chrom, -1e10, np.percentile(chrom, toclip))
for i in [-1, 0, 1]:
fillDiagonal(chrom, 1, i)
if fname in ["default", "lieberman+", "erez+"]:
#upgrade of (Lieberman 2009)
# does IC, then OoE, then IC, then corrcoef, then PCA
chrom = ultracorrect(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["lieberman", "erez"]:
#slight upgrade of (Lieberman 2009)
# does IC, then OoE, then corrcoef, then PCA
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["metaphasepaper", "geoff"]:
chrom = ultracorrect(chrom)
PCs = EIG(chrom, numPCs)[0]
return PCs
else:
raise
if domainFunction in ["lieberman-", "erez-"]:
#simplest function presented in (Lieberman 2009)
#Closest to (Lieberman 2009) that we could do
def domainFunction(chrom):
removeDiagonals(chrom, 1)
chrom = observedOverExpected(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
corrdict, lengthdict = {}, {}
#dict of per-chromosome correlation coefficients
for key in self.dataDict.keys():
corrdict[key] = []
lengthdict[key] = []
dataset = self.dataDict[key]
N = len(dataset)
PCArray = np.zeros((3, N))
for chrom in xrange(len(self.chromosomeStarts)):
if useArms == False:
begs = (self.chromosomeStarts[chrom],)
ends = (self.chromosomeEnds[chrom],)
else:
begs = (self.chromosomeStarts[chrom],
self.centromerePositions[chrom])
ends = (self.centromerePositions[chrom],
self.chromosomeEnds[chrom])
for end, beg in map(None, ends, begs):
if end - beg < 5:
continue
chrom = dataset[beg:end, beg:end]
GC = self.trackDict["GC"][beg:end]
PCs = domainFunction(chrom)
for PC in PCs:
if corr(PC, GC) < 0:
PC *= -1
if swapFirstTwoPCs == True:
if corr(PCs[0], GC) < corr(PCs[1], GC):
p0, p1 = PCs[0].copy(), PCs[1].copy()
PCs[0], PCs[1] = p1, p0
corrdict[key].append(tuple([corr(i, GC) for i in PCs]))
lengthdict[key].append(end - beg)
PCArray[:, beg:end] = PCs
self.PCDict[key] = PCArray
return corrdict, lengthdict
def cisToTrans(self, mode="All", filename="GM-all"):
"""
Calculates cis-to-trans ratio.
"All" - treating SS as trans reads
"Dummy" - fake SS reads proportional to cis reads with the same
total sum
"Matrix" - use heatmap only
"""
data = self.dataDict[filename]
cismap = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
cissums = np.sum(cismap * data, axis=0)
allsums = np.sum(data, axis=0)
if mode.lower() == "all":
cissums += self.singlesDict[filename]
allsums += self.singlesDict[filename]
elif mode.lower() == "dummy":
sm = np.mean(self.singlesDict[filename])
fakesm = cissums * sm / np.mean(cissums)
cissums += fakesm
allsums += fakesm
elif mode.lower() == "matrix":
pass
else:
raise
return cissums / allsums
class binnedDataAnalysis(binnedData):
"""
Class containing experimental features and data analysis scripts
"""
def plotScaling(self, name, label="BLA", color=None, plotUnit=1000000):
"plots scaling of a heatmap,treating arms separately"
import matplotlib.pyplot as plt
data = self.dataDict[name]
bins = numutils.logbins(
2, self.genome.maxChrmArm / self.resolution, 1.17)
s = np.sum(data, axis=0) > 0
mask = s[:, None] * s[None, :]
chroms = []
masks = []
for i in xrange(self.chromosomeCount):
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
observed = []
expected = []
for i in xrange(len(bins) - 1):
low = bins[i]
high = bins[i + 1]
obs = 0
exp = 0
for j in xrange(len(chroms)):
if low > len(chroms[j]):
continue
high2 = min(high, len(chroms[j]))
for k in xrange(low, high2):
obs += np.sum(np.diag(chroms[j], k))
exp += np.sum(np.diag(masks[j], k))
observed.append(obs)
expected.append(exp)
observed = np.array(observed, float)
expected = np.array(expected, float)
values = observed / expected
bins = np.array(bins, float)
bins2 = 0.5 * (bins[:-1] + bins[1:])
norm = np.sum(values * (bins[1:] - bins[:-1]) * (
self.resolution / float(plotUnit)))
args = [self.resolution * bins2 / plotUnit, values / (1. * norm)]
if color is not None:
args.append(color)
plt.plot(*args, label=label, linewidth=2)
def averageTransMap(self, name, **kwargs):
"plots and returns average inter-chromosomal inter-arm map"
import matplotlib.pyplot as plt
from mirnylib.plotting import removeBorder
data = self.dataDict[name]
avarms = np.zeros((80, 80))
avmasks = np.zeros((80, 80))
discardCutoff = 10
for i in xrange(self.chromosomeCount):
print i
for j in xrange(self.chromosomeCount):
for k in [-1, 1]:
for l in [-1, 1]:
if i == j:
continue
cenbeg1 = self.chromosomeStarts[i] + \
self.genome.cntrStarts[i] / self.resolution
cenbeg2 = self.chromosomeStarts[j] + \
self.genome.cntrStarts[j] / self.resolution
cenend1 = self.chromosomeStarts[i] + \
self.genome.cntrEnds[i] / self.resolution
cenend2 = self.chromosomeStarts[j] + \
self.genome.cntrEnds[j] / self.resolution
beg1 = self.chromosomeStarts[i]
beg2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
if k == 1:
bx = cenbeg1
ex = beg1 - 1
dx = -1
else:
bx = cenend1
ex = end1
dx = 1
if l == 1:
by = cenbeg2
ey = beg2 - 1
dy = -1
else:
by = cenend2
ey = end2
dy = 1
if abs(bx - ex) < discardCutoff:
continue
if bx < 0:
bx = None
if ex < 0:
ex = None
if abs(by - ey) < discardCutoff:
continue
if by < 0:
by = None
if ey < 0:
ey = None
arms = data[bx:ex:dx, by:ey:dy]
assert max(arms.shape) <= self.genome.maxChrmArm / \
self.genome.resolution + 2
mx = np.sum(arms, axis=0)
my = np.sum(arms, axis=1)
maskx = mx == 0
masky = my == 0
mask = (maskx[None, :] + masky[:, None]) == False
maskf = np.array(mask, float)
mlenx = (np.abs(np.sum(mask, axis=0)) > 1e-20).sum()
mleny = (np.abs(np.sum(mask, axis=1)) > 1e-20).sum()
if min(mlenx, mleny) < discardCutoff:
continue
add = numutils.zoomOut(arms, avarms.shape)
assert np.abs((arms.sum() - add.sum(
)) / arms.sum()) < 0.02
addmask = numutils.zoomOut(maskf, avarms.shape)
avarms += add
avmasks += addmask
avarms /= np.mean(avarms)
data = avarms / avmasks
data /= np.mean(data)
plt.imshow(np.log(numutils.trunc(
data)), cmap="jet", interpolation="nearest", **kwargs)
removeBorder()
return np.log(numutils.trunc(data))
def perArmCorrelation(self, data1, data2, doByArms=[]):
"""does inter-chromosomal spearman correlation
of two vectors for each chromosomes separately.
Averages over chromosomes with weight of chromosomal length
For chromosomes in "doByArms" treats arms as separatre chromosomes
returns average Spearman r correlation
"""
cr = 0
ln = 0
for i in xrange(self.chromosomeCount):
if i in doByArms:
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
else:
beg = self.chromosomeStarts[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
return cr / ln
def divideOutAveragesPerChromosome(self):
"divides each interchromosomal map by it's mean value"
mask2D = self._giveMask2D()
for chrom1 in xrange(self.chromosomeCount):
for chrom2 in xrange(self.chromosomeCount):
for i in self.dataDict.keys():
value = self.dataDict[i]
submatrix = value[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]]
masksum = np.sum(
mask2D[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]])
valuesum = np.sum(submatrix)
mean = valuesum / masksum
submatrix /= mean
def interchromosomalValues(self, filename="GM-all", returnAll=False):
"""returns average inter-chromosome-interaction values,
ordered always the same way"""
values = self.chromosomeIndex[:, None] + \
self.chromosomeCount * self.chromosomeIndex[None, :]
values[self.chromosomeIndex[:, None] == self.chromosomeIndex[None,
:]] = self.chromosomeCount * self.chromosomeCount - 1
#mat_img(values)
uv = np.sort(np.unique(values))[1:-1]
probs = np.bincount(
values.ravel(), weights=self.dataDict[filename].ravel())
counts = np.bincount(values.ravel())
if returnAll == False:
return probs[uv] / counts[uv]
else:
probs[self.chromosomeCount * self.chromosomeCount - 1] = 0
values = probs / counts
values[counts == 0] = 0
#mat_img(values.reshape((22,22)))
return values.reshape((self.chromosomeCount, self.chromosomeCount))
class experimentalBinnedData(binnedData):
"Contains some poorly-implemented new features"
def projectOnEigenvalues(self, eigenvectors=[0]):
"""
Calculates projection of the data on a set of eigenvectors.
This is used to calculate heatmaps, reconstructed from eigenvectors.
Parameters
----------
eigenvectors : list of non-negative ints, optional
Zero-based indices of eigenvectors, to project onto
By default projects on the first eigenvector
Returns
-------
Puts resulting data in dataDict under DATANAME_projected key
"""
for name in self.dataDict.keys():
if name not in self.EigDict:
raise RuntimeError("Calculate eigenvectors first!")
PCs = self.EigDict[name]
if max(eigenvectors) >= len(PCs):
raise RuntimeError("Not enough eigenvectors."
"Increase numPCs in doEig()")
PCs = PCs[eigenvectors]
eigenvalues = self.eigEigenvalueDict[name][eigenvectors]
proj = reduce(lambda x, y: x + y,
[PCs[i][:, None] * PCs[i][None, :] * \
eigenvalues[i] for i in xrange(len(PCs))])
mask = PCs[0] != 0
mask = mask[:, None] * mask[None, :] # maks of non-zero elements
data = self.dataDict[name]
datamean = np.mean(data[mask])
proj[mask] += datamean
self.dataDict[name + "_projected"] = proj
def emulateCis(self):
"""if you want to have fun creating syntetic data,
this emulates cis contacts. adjust cis/trans ratio in the C code"""
from scipy import weave
transmap = self.chromosomeIndex[:,
None] == self.chromosomeIndex[None, :]
len(transmap)
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
N = len(data)
N
code = r"""
#line 1427 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = 0; j<N; j++)
{
if (transmap[N * i + j] == 1)
{
data[N * i + j] = data[N * i +j] * 300 /(abs(i-j) + \
0.5);
}
}
}
"""
support = """
#include <math.h>
"""
weave.inline(code, ['transmap', 'data', "N"],
extra_compile_args=['-march=native -malign-double'],
support_code=support)
self.dataDict[i] = data
self.removedCis = False
self.fakedCis = False
def fakeMissing(self):
"""fakes megabases that have no reads. For cis reads fakes with cis
reads at the same distance. For trans fakes with random trans read
at the same diagonal.
"""
from scipy import weave
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
sm = np.sum(data, axis=0) > 0
mask = sm[:, None] * sm[None, :]
transmask = np.array(self.chromosomeIndex[:, None]
== self.chromosomeIndex[None, :], int)
#mat_img(transmask)
N = len(data)
N, transmask, mask # to remove warning
code = r"""
#line 1467 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = i; j<N; j++)
{
if ((MASK2(i,j) == 0) )
{
for (int ss = 0; ss < 401; ss++)
{
int k = 0;
int s = rand() % (N - (j-i));
if ((mask[s * N + s + j - i] == 1) &&\
((transmask[s * N + s + j - i] ==\
transmask[i * N + j]) || (ss > 200)) )
{
data[i * N + j] = data[s * N + s + j - i];
data[j * N + i] = data[s * N + s + j - i];
break;
}
if (ss == 400) {printf("Cannot fake one point... \
skipping %d %d \n",i,j);}
}
}
}
}
"""
support = """
#include <math.h>
"""
for _ in xrange(5):
weave.inline(code, ['transmask', 'mask', 'data', "N"],
extra_compile_args=['-march=native'
' -malign-double -O3'],
support_code=support)
data = correct(data)
self.dataDict[i] = data
#mat_img(self.dataDict[i]>0)
def iterativeCorrectByTrans(self, names=None):
"""performs iterative correction by trans data only, corrects cis also
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
"""
self.appliedOperations["Corrected"] = True
if names is None:
names = self.dataDict.keys()
self.transmap = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
#mat_img(self.transmap)
for i in names:
data = self.dataDict[i]
self.dataDict[i], self.biasDict[i] = \
numutils.ultracorrectSymmetricByMask(data, self.transmap, M=None)
try:
self.singlesDict[i] /= self.biasDict[i]
except:
print "bla"
def loadWigFile(self, filenames, label, control=None,
wigFileType="Auto", functionToAverage=np.log, internalResolution=1000):
byChromosome = self.genome.parseAnyWigFile(filenames=filenames,
control=control,
wigFileType=wigFileType,
functionToAverage=functionToAverage,
internalResolution=internalResolution)
self.trackDict[label] = np.concatenate(byChromosome)
def loadErezEigenvector1MB(self, erezFolder):
"Loads Erez chromatin domain eigenvector for HindIII"
if self.resolution != 1000000:
raise StandardError("Erez eigenvector is only at 1MB resolution")
if self.genome.folderName != "hg18":
raise StandardError("Erez eigenvector is for hg18 only!")
folder = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector.tab")
folder2 = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector2.tab")
eigenvector = np.zeros(self.genome.numBins, float)
for chrom in range(1, 24):
filename = folder.replace("DATA1", str(chrom))
if chrom in [4, 5]:
filename = folder2.replace("DATA1", str(chrom))
mydata = np.array([[float(j) for j in i.split(
)] for i in open(filename).readlines()])
eigenvector[self.genome.chrmStartsBinCont[chrom -
1] + np.array(mydata[:, 1], int)] = mydata[:, 2]
self.trackDict["Erez"] = eigenvector
def loadTanayDomains(self):
"domains, extracted from Tanay paper image"
if self.genome.folderName != "hg18":
raise StandardError("Tanay domains work only with hg18")
data = """0 - 17, 1 - 13.5, 2 - 6.5, 0 - 2, 2 - 2; x - 6.5, 0 - 6,\
1 - 13.5, 0 - 1.5, 1 - 14.5
1 - 8.5, 0 - 2.5, 1 - 14, 2 - 6; 0 - 1.5, 2 - 11.5, 1 - 35
1 - 14, 0-6, 2 - 11; 2 - 4.5, 1 - 5, 0 - 4, 1 -20.5, 0 - 2
0 - 3, 2 - 14; 2 - 5, 1 - 42
2 - 16; 2 - 7, 0 - 3, 1 - 18.5, 0 - 1, 1 - 13, 0 - 2.5
0 - 2, 1 - 6.5, 0 - 7.5, 2 - 4; 2 - 6, 1 - 31
0 - 2, 1 - 11, 2 - 7; 2 - 7.5, 1 - 5, 0 - 3, 1 - 19
2 - 9.5, 0 - 1, 2 - 5; 2 - 4, 1 - 27.5, 0 - 2.5
2 - 11.5, 0 - 2.5, x - 2.5; x - 5, 2 - 8, 0 - 3.5, 1 - 9, 0 - 6
2 - 13.5; 2 - 9, 0 - 3, 1 - 6, 0 - 3.5, 1 - 10.5
0 - 3.5, 2 - 15; 2 - 1, 0 - 7.5, 1 - 13, 0 - 1.5, 1 - 4
0 - 4, 2 - 8; 2 - 2, 0 - 5, 2 - 2.5, 1 - 13, 0 - 6.5, 1 - 3.5
x - 5.5; 2 - 8.5, 0 - 1, 2 - 7, 1 - 16
x - 5.5; 2 - 14.5, 0 - 6, 2 - 3, 1 - 2.5, 2 - 1, 0 - 3
x - 5.5; 2 - 6, 0 - 3.5, 2 - 1.5, 0 - 11.5, 2 - 5.5
0 - 11, 2 - 1; x - 2.5, 2 - 6.5, 0 - 3, 2 - 2, 0 - 3.5
0 - 4, 2 - 1.5, 0 - 1.5; 0 - 19
2 - 5; 2 - 20
0 - 9.5, x - 1.5; x - 1, 2 - 2, 0 - 8.5
0 - 2, 2 - 7; 0 - 8, 2 - 2, 0 - 1
x - 0.5; 2 - 8.5, 0 - 3
x - 4; 0 -12
x - 1.5, 1 - 13, 2 - 5.5; 2 - 2, 1 - 29"""
chroms = [i.split(";") for i in data.split("\n")]
result = []
for chrom in chroms:
result.append([])
cur = result[-1]
for arm in chrom:
for enrty in arm.split(","):
spentry = enrty.split("-")
if "x" in spentry[0]:
value = -1
else:
value = int(spentry[0])
cur += ([value] * int(2 * float(spentry[1])))
cur += [-1] * 2
#lenses = [len(i) for i in result]
domains = np.zeros(self.genome.numBins, int)
for i in xrange(self.genome.chrmCount):
for j in xrange((self.genome.chrmLens[i] / self.resolution)):
domains[self.genome.chrmStartsBinCont[i] + j] = \
result[i][(j * len(result[i]) / ((self.genome.chrmLens[i] /
self.resolution)))]
self.trackDict['TanayDomains'] = domains
| bsd-3-clause |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/mpl_toolkits/axes_grid1/colorbar.py | 10 | 27874 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.collections as collections
import matplotlib.contour as contour
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Bbox
make_axes_kw_doc = '''
============= ====================================================
Property Description
============= ====================================================
*orientation* vertical or horizontal
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
============= ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g., '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
It is known that some vector graphics viewer (svg and pdf) renders white gaps
between segments of the colorbar. This is due to bugs in the viewers not
matplotlib. As a workaround the colorbar can be rendered with overlapping
segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However this has negative consequences in other circumstances. Particularly with
semi transparent images (alpha < 1) and colorbar extensions and is not enabled
by default see (issue #1188).
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
The transData of the *cax* is adjusted so that the limits in the
longest axis actually corresponds to the limits in colorbar range. On
the other hand, the shortest axis has a data limits of [1,2], whose
unconventional value is to prevent underflow when log scale is used.
''' % (make_axes_kw_doc, colormap_kw_doc)
docstring.interpd.update(colorbar_doc=colorbar_doc)
class CbarAxesLocator(object):
"""
CbarAxesLocator is a axes_locator for colorbar axes. It adjust the
position of the axes to make a room for extended ends, i.e., the
extended ends are located outside the axes area.
"""
def __init__(self, locator=None, extend="neither", orientation="vertical"):
"""
*locator* : the bbox returned from the locator is used as a
initial axes location. If None, axes.bbox is used.
*extend* : same as in ColorbarBase
*orientation* : same as in ColorbarBase
"""
self._locator = locator
self.extesion_fraction = 0.05
self.extend = extend
self.orientation = orientation
def get_original_position(self, axes, renderer):
"""
get the original position of the axes.
"""
if self._locator is None:
bbox = axes.get_position(original=True)
else:
bbox = self._locator(axes, renderer)
return bbox
def get_end_vertices(self):
"""
return a tuple of two vertices for the colorbar extended ends.
The first vertices is for the minimum end, and the second is for
the maximum end.
"""
# Note that concatenating two vertices needs to make a
# vertices for the frame.
extesion_fraction = self.extesion_fraction
corx = extesion_fraction*2.
cory = 1./(1. - corx)
x1, y1, w, h = 0, 0, 1, 1
x2, y2 = x1 + w, y1 + h
dw, dh = w*extesion_fraction, h*extesion_fraction*cory
if self.extend in ["min", "both"]:
bottom = [(x1, y1),
(x1+w/2., y1-dh),
(x2, y1)]
else:
bottom = [(x1, y1),
(x2, y1)]
if self.extend in ["max", "both"]:
top = [(x2, y2),
(x1+w/2., y2+dh),
(x1, y2)]
else:
top = [(x2, y2),
(x1, y2)]
if self.orientation == "horizontal":
bottom = [(y,x) for (x,y) in bottom]
top = [(y,x) for (x,y) in top]
return bottom, top
def get_path_patch(self):
"""
get the path for axes patch
"""
end1, end2 = self.get_end_vertices()
verts = [] + end1 + end2 + end1[:1]
return Path(verts)
def get_path_ends(self):
"""
get the paths for extended ends
"""
end1, end2 = self.get_end_vertices()
return Path(end1), Path(end2)
def __call__(self, axes, renderer):
"""
Return the adjusted position of the axes
"""
bbox0 = self.get_original_position(axes, renderer)
bbox = bbox0
x1, y1, w, h = bbox.bounds
extesion_fraction = self.extesion_fraction
dw, dh = w*extesion_fraction, h*extesion_fraction
if self.extend in ["min", "both"]:
if self.orientation == "horizontal":
x1 = x1 + dw
else:
y1 = y1+dh
if self.extend in ["max", "both"]:
if self.orientation == "horizontal":
w = w-2*dw
else:
h = h-2*dh
return Bbox.from_bounds(x1, y1, w, h)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
# artists
self.solids = None
self.lines = None
self.dividers = None
self.extension_patch1 = None
self.extension_patch2 = None
if orientation == "vertical":
self.cbar_axis = self.ax.yaxis
else:
self.cbar_axis = self.ax.xaxis
if format is None:
if isinstance(self.norm, colors.LogNorm):
# change both axis for proper aspect
self.ax.set_xscale("log")
self.ax.set_yscale("log")
self.cbar_axis.set_minor_locator(ticker.NullLocator())
formatter = ticker.LogFormatter()
else:
formatter = None
elif cbook.is_string_like(format):
formatter = ticker.FormatStrFormatter(format)
else:
formatter = format # Assume it is a Formatter
if formatter is None:
formatter = self.cbar_axis.get_major_formatter()
else:
self.cbar_axis.set_major_formatter(formatter)
if cbook.iterable(ticks):
self.cbar_axis.set_ticks(ticks)
elif ticks is not None:
self.cbar_axis.set_major_locator(ticks)
else:
self._select_locator(formatter)
self._config_axes()
self.update_artists()
self.set_label_text('')
def _get_colorbar_limits(self):
"""
initial limits for colorbar range. The returned min, max values
will be used to create colorbar solid(?) and etc.
"""
if self.boundaries is not None:
C = self.boundaries
if self.extend in ["min", "both"]:
C = C[1:]
if self.extend in ["max", "both"]:
C = C[:-1]
return min(C), max(C)
else:
return self.get_clim()
def _config_axes(self):
'''
Adjust the properties of the axes to be adequate for colorbar display.
'''
ax = self.ax
axes_locator = CbarAxesLocator(ax.get_axes_locator(),
extend=self.extend,
orientation=self.orientation)
ax.set_axes_locator(axes_locator)
# override the get_data_ratio for the aspect works.
def _f():
return 1.
ax.get_data_ratio = _f
ax.get_data_ratio_log = _f
ax.set_frame_on(True)
ax.set_navigate(False)
self.ax.set_autoscalex_on(False)
self.ax.set_autoscaley_on(False)
if self.orientation == 'horizontal':
ax.xaxis.set_label_position('bottom')
ax.set_yticks([])
else:
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
def update_artists(self):
"""
Update the colorbar associated artists, *filled* and
*ends*. Note that *lines* are not updated. This needs to be
called whenever clim of associated image changes.
"""
self._process_values()
self._add_ends()
X, Y = self._mesh()
if self.filled:
C = self._values[:,np.newaxis]
self._add_solids(X, Y, C)
ax = self.ax
vmin, vmax = self._get_colorbar_limits()
if self.orientation == 'horizontal':
ax.set_ylim(1, 2)
ax.set_xlim(vmin, vmax)
else:
ax.set_xlim(1, 2)
ax.set_ylim(vmin, vmax)
def _add_ends(self):
"""
Create patches from extended ends and add them to the axes.
"""
del self.extension_patch1
del self.extension_patch2
path1, path2 = self.ax.get_axes_locator().get_path_ends()
fc=mpl.rcParams['axes.facecolor']
ec=mpl.rcParams['axes.edgecolor']
linewidths=0.5*mpl.rcParams['axes.linewidth']
self.extension_patch1 = PathPatch(path1,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.extension_patch2 = PathPatch(path2,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.ax.add_artist(self.extension_patch1)
self.ax.add_artist(self.extension_patch2)
def _set_label_text(self):
"""
set label.
"""
self.cbar_axis.set_label_text(self._label, **self._labelkw)
def set_label_text(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label_text()
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [list(zip(X[i], Y[i])) for i in xrange(1, N-1)]
else:
return [list(zip(Y[i], X[i])) for i in xrange(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.extend in ["min", "both"]:
cc = self.to_rgba([C[0][0]])
self.extension_patch1.set_fc(cc[0])
X, Y, C = X[1:], Y[1:], C[1:]
if self.extend in ["max", "both"]:
cc = self.to_rgba([C[-1][0]])
self.extension_patch2.set_fc(cc[0])
X, Y, C = X[:-1], Y[:-1], C[:-1]
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha,
}
del self.solids
del self.dividers
col = self.ax.pcolormesh(*args, **kw)
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],),
)
self.ax.add_collection(self.dividers)
else:
self.dividers = None
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar. It deletes preexisting lines.
'''
del self.lines
N = len(levels)
x = np.array([1.0, 2.0])
X, Y = np.meshgrid(x,levels)
if self.orientation == 'vertical':
xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
else:
xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
col = collections.LineCollection(xy, linewidths=linewidths,
)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _select_locator(self, formatter):
'''
select a suitable locator
'''
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator(nbins=5)
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b) #, nbins=10)
self.cbar_axis.set_major_locator(locator)
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v = np.arange(self.cmap.N, dtype=np.int16)
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = np.array(self.norm.boundaries)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v = 0.5*(bi[:-1] + bi[1:])
self._boundaries = b
self._values = v
return
else:
b = self._uniform_y(self.cmap.N+1)
self._process_values(b)
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries.
'''
vmin, vmax = self._get_colorbar_limits()
if isinstance(self.norm, colors.LogNorm):
y = np.logspace(np.log10(vmin), np.log10(vmax), N)
else:
y = np.linspace(vmin, vmax, N)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([1.0, 2.0])
if self.spacing == 'uniform':
y = self._uniform_y(len(self._boundaries))
else:
y = self._boundaries
self._y = y
X, Y = np.meshgrid(x,y)
return X, Y
def set_alpha(self, alpha):
"""
set alpha value.
"""
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
"""
Update the colorbar artists to reflect the change of the
associated mappable.
"""
self.update_artists()
if isinstance(mappable, contour.ContourSet):
if not mappable.filled:
self.add_lines(mappable)
@docstring.Substitution(make_axes_kw_doc)
def make_axes(parent, **kw):
'''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
'''
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
def colorbar(mappable, cax=None, ax=None, **kw):
"""
Create a colorbar for a ScalarMappable instance.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if cax is None:
cax, kw = make_axes(ax, **kw)
cax._hold = True
cb = Colorbar(cax, mappable, **kw)
def on_changed(m):
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
ax.figure.sca(ax)
return cb
| bsd-3-clause |
acmaheri/sms-tools | lectures/6-Harmonic-model/plots-code/f0-TWM-errors-1.py | 2 | 3586 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
def TWM (pfreq, pmag, maxnpeaks, f0c):
# Two-way mismatch algorithm for f0 detection (by Beauchamp&Maher)
# pfreq, pmag: peak frequencies in Hz and magnitudes, maxnpeaks: maximum number of peaks used
# f0cand: frequencies of f0 candidates
# returns f0: fundamental frequency detected
p = 0.5 # weighting by frequency value
q = 1.4 # weighting related to magnitude of peaks
r = 0.5 # scaling related to magnitude of peaks
rho = 0.33 # weighting of MP error
Amax = max(pmag) # maximum peak magnitude
harmonic = np.matrix(f0c)
ErrorPM = np.zeros(harmonic.size) # initialize PM errors
MaxNPM = min(maxnpeaks, pfreq.size)
for i in range(0, MaxNPM) : # predicted to measured mismatch error
difmatrixPM = harmonic.T * np.ones(pfreq.size)
difmatrixPM = abs(difmatrixPM - np.ones((harmonic.size, 1))*pfreq)
FreqDistance = np.amin(difmatrixPM, axis=1) # minimum along rows
peakloc = np.argmin(difmatrixPM, axis=1)
Ponddif = np.array(FreqDistance) * (np.array(harmonic.T)**(-p))
PeakMag = pmag[peakloc]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorPM = ErrorPM + (Ponddif + MagFactor*(q*Ponddif-r)).T
harmonic = harmonic+f0c
ErrorMP = np.zeros(harmonic.size) # initialize MP errors
MaxNMP = min(10, pfreq.size)
for i in range(0, f0c.size) : # measured to predicted mismatch error
nharm = np.round(pfreq[:MaxNMP]/f0c[i])
nharm = (nharm>=1)*nharm + (nharm<1)
FreqDistance = abs(pfreq[:MaxNMP] - nharm*f0c[i])
Ponddif = FreqDistance * (pfreq[:MaxNMP]**(-p))
PeakMag = pmag[:MaxNMP]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorMP[i] = sum(MagFactor * (Ponddif + MagFactor*(q*Ponddif-r)))
Error = (ErrorPM[0]/MaxNPM) + (rho*ErrorMP/MaxNMP) # total error
f0index = np.argmin(Error) # get the smallest error
f0 = f0c[f0index] # f0 with the smallest error
return f0, ErrorPM, ErrorMP, Error
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 1024
hN = N/2
M = 801
t = -40
start = .8*fs
minf0 = 100
maxf0 = 1500
w = blackman (M)
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs * iploc/N
f0cand = np.arange(minf0, maxf0, 1.0)
maxnpeaks = 10
f0, ErrorPM, ErrorMP, Error = TWM (ipfreq, ipmag, maxnpeaks, f0cand)
freqaxis = fs*np.arange(N/2)/float(N)
plt.figure(1, figsize=(9, 7))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([100,5100,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(f0cand,ErrorPM[0], 'b', label = 'ErrorPM', lw=1.2)
plt.plot(f0cand,ErrorMP, 'g', label = 'ErrorMP', lw=1.2)
plt.plot(f0cand,Error, color='black', label = 'Error Total', lw=1.5)
plt.axis([minf0,maxf0,min(Error),130])
plt.legend()
plt.title('TWM Errors')
plt.tight_layout()
plt.savefig('f0-TWM-errors-1.png')
plt.show()
| agpl-3.0 |
tansey/vsmrfs | vsmrfs/test_learning.py | 2 | 6974 | import matplotlib
matplotlib.use('Agg')
from matplotlib import cm, colors
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
import argparse
import csv
import sys
from node_learning import *
from exponential_families import *
from utils import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs the maximum likelihood estimation (MLE) algorithm for a single node-conditional.')
# Generic settings
parser.add_argument('data_file', help='The file containing the sufficient statistics.')
#parser.add_argument('neighbors_file', help='The file containing the neighbors partition.')
parser.add_argument('--verbose', type=int, default=2, help='Print detailed progress information to the console. 0=none, 1=high-level only, 2=all details.')
parser.add_argument('--distribution', default='b', help='The distribution of the target node-conditional.')
parser.add_argument('--target', type=int, help='The target node.')
#parser.add_argument('--postprocess', dest='generate_data', action='store_true', help='Re-runs the MLE to find the right weight values once nonzeros are detected. TODO: p >> n, so how do we do this? adaptive lasso?')
parser.add_argument('--true_weights', help='The true weights file.')
# Data saving settings
parser.add_argument('--save_weights', help='The file where the resulting weights will be saved.')
parser.add_argument('--save_edges', help='The file where the resulting edges will be saved.')
parser.add_argument('--save_metrics', help='The file where the resulting metrics such as BIC, DoF, etc. will be saved.')
# Plotting settings
parser.add_argument('--plot_results', help='The file to which the results will be plotted.')
parser.add_argument('--plot_edges', help='The file to which the estimated signal distribution will be plotted.')
parser.add_argument('--plot_path', help='The file to which the solution path of the penalty (lambda) will be plotted.')
parser.add_argument('--plot_final', help='The file to which the results of the final solution will be plotted.')
# Solution path and lambda settings
parser.add_argument('--solution_path', dest='solution_path', action='store_true', help='Use the solution path of the generalized lasso to find a good value for the penalty weight (lambda).')
parser.add_argument('--min_lambda1', type=float, default=0.2, help='The minimum amount the lambda1 penalty can take in the solution path.')
parser.add_argument('--max_lambda1', type=float, default=1.5, help='The maximum amount the lambda1 penalty can take in the solution path.')
parser.add_argument('--min_lambda2', type=float, default=0.2, help='The minimum amount the lambda2 penalty can take in the solution path.')
parser.add_argument('--max_lambda2', type=float, default=1.5, help='The maximum amount the lambda2 penalty can take in the solution path.')
parser.add_argument('--penalty_bins', type=int, default=30, help='The number of lambda penalty values in the solution path.')
parser.add_argument('--dof_tolerance', type=float, default=1e-4, help='The threshold for calculating the degrees of freedom.')
parser.add_argument('--lambda1', type=float, default=0.3, help='The lambda1 penalty that controls the sparsity of edges (only used if --solution_path is not specified).')
parser.add_argument('--lambda2', type=float, default=0.3, help='The lambda2 penalty that controls the sparsity of individual weights (only used if --solution_path is not specified).')
# Convergence settings
parser.add_argument('--rel_tol', type=float, default=1e-6, help='The convergence threshold for the main optimization loop.')
parser.add_argument('--edge_tol', type=float, default=1e-3, help='The convergence threshold for the edge definition criteria.')
parser.add_argument('--max_steps', type=int, default=300, help='The maximum number of steps for the main optimization loop.')
parser.add_argument('--newton_rel_tol', type=float, default=1e-6, help='The convergence threshold for the inner loop of Newton\'s method.')
parser.add_argument('--newton_max_steps', type=int, default=100, help='The maximum number of steps for the inner loop of Newton\'s method.')
# ADMM settings
parser.add_argument('--admm_alpha', type=float, default=1.8, help='The step size value for the ADMM solver.')
parser.add_argument('--admm_inflate', type=float, default=2., help='The inflation/deflation rate for the ADMM step size.')
parser.set_defaults()
# Get the arguments from the command line
args = parser.parse_args()
# Load the data
#data = sp.lil_matrix(np.loadtxt(args.data_file, delimiter=','))
header = get_numeric_header(args.data_file)
data = np.loadtxt(args.data_file, delimiter=',', skiprows=1)
# Rearrange the data so that sufficient statistics of this node come first
target_cols = np.where(header == args.target)[0]
neighbors_partition = np.hstack([[args.target], np.delete(header, target_cols)])
c = np.hstack([target_cols, np.delete(np.arange(data.shape[1]), target_cols)])
print 'c: {0} target: {1}'.format(c,target_cols)
data = data[:, c]
true_weights = np.loadtxt(args.true_weights, delimiter=',')
'''Bernoulli testing'''
# Count
# counts = np.zeros((2,2,2))
# for row in data.astype(int):
# counts[row[0],row[1],row[2]] += 1
# print 'X=0 Counts'
# print pretty_str(counts[0])
# print 'X=1 Counts'
# print pretty_str(counts[1])
# probs = counts[1] / (counts[0] + counts[1])
# # normalize
# print ''
# print 'Empirical:'
# print 'p(X0=1 | X1=0, X2=0) = {0}'.format(probs[0,0])
# print 'p(X0=1 | X1=1, X2=0) = {0}'.format(probs[1,0])
# print 'p(X0=1 | X1=0, X2=1) = {0}'.format(probs[0,1])
# print 'p(X0=1 | X1=1, X2=1) = {0}'.format(probs[1,1])
# print ''
# print 'Truth: '
# eta1 = true_weights.dot(np.array([1, 0, 0]))
# print 'p(X0=1 | X1=0, X2=0) = {0}'.format(np.exp(eta1) / (1 + np.exp(eta1)))
# eta1 = true_weights.dot(np.array([1, 1, 0]))
# print 'p(X0=1 | X1=1, X2=0) = {0}'.format(np.exp(eta1) / (1 + np.exp(eta1)))
# eta1 = true_weights.dot(np.array([1, 0, 1]))
# print 'p(X0=1 | X1=0, X2=1) = {0}'.format(np.exp(eta1) / (1 + np.exp(eta1)))
# eta1 = true_weights.dot(np.array([1, 1, 1]))
# print 'p(X0=1 | X1=1, X2=1) = {0}'.format(np.exp(eta1) / (1 + np.exp(eta1)))
'''Dirichlet testing'''
means = data.mean(axis=0)
print 'Averages: {0}'.format(means.reshape(len(means)/3, 3))
print 'Truth: {0}'.format(true_weights[:,0] / true_weights[:,0].sum())
# print ''
# five_cols = np.where(header == 5)[0]
# for i in xrange(3):
# for j in five_cols:
# print '{0}->{1}: {2}'.format(i, j, np.correlate(data[:,i], data[:,j]))
print ''
print np.cov(data.T)[0:3][:, np.where(header == 5)]
| mit |
zutshi/S3CAMX | create_example_from_template.py | 1 | 6354 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import fileOps as f
import sys
import err
plant_str = '''
# Must satisfy the signature
# [t,X,D,P] = sim_function(T,X0,D0,P0,I0);
import numpy as np
from scipy.integrate import ode
import matplotlib.pyplot as PLT
class SIM(object):
def __init__(self, plt, pvt_init_data):
pass
def sim(self, TT, X0, D, P, U, I, property_checker, property_violated_flag):
# atol = 1e-10
rtol = 1e-5
num_dim_x = len(X0)
plot_data = [np.empty(0, dtype=float), np.empty((0, num_dim_x), dtype=float)]
# tt,YY,dummy_D,dummy_P
solver = ode(dyn).set_integrator('dopri5', rtol=rtol)
Ti = TT[0]
Tf = TT[1]
T = Tf - Ti
if property_checker:
violating_state = [()]
solver.set_solout(solout_fun(property_checker, violating_state, plot_data)) # (2)
solver.set_initial_value(X0, t=0.0)
solver.set_f_params(U)
X_ = solver.integrate(T)
if property_checker is not None:
if property_checker(Tf, X_):
property_violated_flag[0] = True
dummy_D = np.zeros(D.shape)
dummy_P = np.zeros(P.shape)
ret_t = Tf
ret_X = X_
ret_D = dummy_D
ret_P = dummy_P
# TODO: plotting needs to be fixed
#PLT.plot(plot_data[0] + Ti, plot_data[1][:, 0])
#PLT.plot(plot_data[0] + Ti, plot_data[1][:, 1])
#PLT.plot(plot_data[1][:, 0], plot_data[1][:, 1])
##PLT.plot(plot_data[0] + Ti, np.tile(U, plot_data[0].shape))
return (ret_t, ret_X, ret_D, ret_P)
# State Space Modeling Template
# dx/dt = Ax + Bu
# y = Cx + Du
def dyn(t, x, u):
#u = np.matrix([u[0], 0.0]).T
#x = np.matrix(x).T
#X_ = A*x + B*u
#return np.array(X_.T)
pass
def solout_fun(property_checker, violating_state, plot_data):
def solout(t, Y):
plot_data[0] = np.concatenate((plot_data[0], np.array([t])))
plot_data[1] = np.concatenate((plot_data[1], np.array([Y])))
return 0
return solout
'''
tst_str = '''
# sampling time
delta_t = <0.0>
# pvt simulator state required for initializing the simulator
plant_pvt_init_data = None
#############################
# P1: Property Description
#############################
# Time Horizon
T = <0.0>
# Rectangular bounds on initial plant states X0[0, :] <= X <= X0[1, :]
initial_set = <[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]>
# Unsafe Boxed Region
error_set = <[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]>
# rectangular bounds on exogenous inputs to the contorller. Such as, controller
# disturbance.
ci = <[[0.0], [0.0]]>
############################
# Results Scratchpad:
# vio = _/100k took _ mins [plotting, logging?]
# SS = falsified in _ [plotting, logging?]
# grid_eps = <[0.0, 0.0]>
# num_samples = <2>
# SS + symex: falsified in _ [plotting, logging?]
########################
# Abstraction Params
########################
# initial abstraction grid size
grid_eps = <[0.0, 0.0, 0.0]>
# initial abstraction pi grid size
pi_grid_eps = <[0.0, 0.0]>
# number of samples at every scatter step
num_samples = <0>
# maximum iteration before SS iter outs.
MAX_ITER = <0>
min_smt_sample_dist = <0.5>
########################
# initial controller states which are C ints
initial_controller_integer_state = <[0.0]>
# initial controller states which are C doubles
initial_controller_float_state = <[0.0]>
# number of control inputs to the plant
num_control_inputs = <0>
################################
# Unimplemented
################################
# Initial plant discrete state: List all states
initial_discrete_state = <[0]>
# Rectangularly bounded exogenous inputs to the plant (plant noise).
pi = [[], []]
# Initial pvt simulator state, associated with with an execution trace.
initial_pvt_states = []
################################
################
# Simulators
################
## Plant ##
# is the plant simulator implemented in Python(python) or Matlab(matlab)?
plant_description = <'python'/'matlab'>
# relative/absolute path for the simulator file containing sim()
plant_path = <'*.py'/'*.m'>
## Controller ##
# relative/absolute path for the controller .so
controller_path = <'*.so'>
# relative path for the directory containing smt2 files for each path
controller_path_dir_path = './paths'
###############
################
# DO NOT MODIFY
################
CONVERSION_FACTOR = 1.0
refinement_factor = 2.0
'''
compile_script_str = '''
#!/usr/bin/env bash
set -o verbose
soname=<>.so
SOURCE=<>.c
# gcc -c -Wall ./$SOURCE
# Create SO
gcc -shared -Wl,-soname,$soname -o ./$soname -fPIC ./$SOURCE
#llvm-gcc -emit-llvm -c -g <>.c
'''
controller_h_str = '''
typedef struct{
int* int_state_arr;
double* float_state_arr;
double* output_arr;
}RETURN_VAL;
typedef struct{
double* input_arr;
int* int_state_arr;
double* float_state_arr;
double* x_arr;
}INPUT_VAL;
void* controller(INPUT_VAL* input, RETURN_VAL* ret_val);
void controller_init();
'''
controller_c_str = '''
#include "controller.h"
void controller_init(){
}
void* controller(INPUT_VAL* input, RETURN_VAL* ret_val)
{
_ = input->x_arr[];
_ = input->float_state_arr[];
_ = input->int_state_arr[];
_ = input->input_arr[];
ret_val->output_arr[] = _;
ret_val->float_state_arr[] = _;
ret_val->int_state_arr[] = _;
return (void*)0;
}
'''
PC_DIR_PATH = './paths/pathcrawler'
KLEE_DIR_PATH = './paths/klee'
TST_FILE = '{}.tst'
PLANT_FILE = '{}_plant.py'
COMPILATION_SCRIPT = 'compile.sh'
CONTROLLER_H = 'controller.h'
CONTROLLER_C = '{}_controller.c'
def main():
dir_path = f.sanitize_path(sys.argv[1])
if f.file_exists(dir_path):
print 'Failed: requested directory exists: {}'.format(dir_path)
return
if dir_path == '':
raise err.Fatal('dir path empty!')
dir_name = f.get_file_name_from_path(dir_path)
f.make_n_change_dir(dir_path)
f.write_data(TST_FILE.format(dir_name), tst_str)
f.write_data(PLANT_FILE.format(dir_name), plant_str)
f.write_data(COMPILATION_SCRIPT, compile_script_str)
f.write_data(CONTROLLER_H, controller_h_str)
f.write_data(CONTROLLER_C.format(dir_name), controller_c_str)
f.make_dir(PC_DIR_PATH)
# make compilation script executable
f.make_exec(COMPILATION_SCRIPT)
if __name__ == '__main__':
main()
| bsd-2-clause |
hainm/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
zingale/pyro2 | compressible_react/simulation.py | 1 | 3310 | from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import compressible
import compressible.eos as eos
import util.plot_tools as plot_tools
class Simulation(compressible.Simulation):
def initialize(self):
"""
For the reacting compressible solver, our initialization of
the data is the same as the compressible solver, but we
supply additional variables.
"""
super().initialize(extra_vars=["fuel", "ash"])
def burn(self, dt):
""" react fuel to ash """
# compute T
# compute energy generation rate
# update energy due to reaction
pass
def diffuse(self, dt):
""" diffuse for dt """
# compute T
# compute div kappa grad T
# update energy due to diffusion
pass
def evolve(self):
"""
Evolve the equations of compressible hydrodynamics through a
timestep dt.
"""
# we want to do Strang-splitting here
self.burn(self.dt/2)
self.diffuse(self.dt/2)
if self.particles is not None:
self.particles.update_particles(self.dt/2)
# note: this will do the time increment and n increment
super().evolve()
if self.particles is not None:
self.particles.update_particles(self.dt/2)
self.diffuse(self.dt/2)
self.burn(self.dt/2)
def dovis(self):
"""
Do runtime visualization.
"""
plt.clf()
plt.rc("font", size=10)
# we do this even though ivars is in self, so this works when
# we are plotting from a file
ivars = compressible.Variables(self.cc_data)
# access gamma from the cc_data object so we can use dovis
# outside of a running simulation.
gamma = self.cc_data.get_aux("gamma")
q = compressible.cons_to_prim(self.cc_data.data, gamma, ivars, self.cc_data.grid)
rho = q[:, :, ivars.irho]
u = q[:, :, ivars.iu]
v = q[:, :, ivars.iv]
p = q[:, :, ivars.ip]
e = eos.rhoe(gamma, p)/rho
X = q[:, :, ivars.ix]
magvel = np.sqrt(u**2 + v**2)
myg = self.cc_data.grid
fields = [rho, magvel, p, e, X]
field_names = [r"$\rho$", r"U", "p", "e", r"$X_\mathrm{fuel}$"]
f, axes, cbar_title = plot_tools.setup_axes(myg, len(fields))
for n, ax in enumerate(axes):
v = fields[n]
img = ax.imshow(np.transpose(v.v()),
interpolation="nearest", origin="lower",
extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax],
cmap=self.cm)
ax.set_xlabel("x")
ax.set_ylabel("y")
# needed for PDF rendering
cb = axes.cbar_axes[n].colorbar(img)
cb.formatter = matplotlib.ticker.FormatStrFormatter("")
cb.solids.set_rasterized(True)
cb.solids.set_edgecolor("face")
if cbar_title:
cb.ax.set_title(field_names[n])
else:
ax.set_title(field_names[n])
plt.figtext(0.05, 0.0125, "t = {:10.5g}".format(self.cc_data.t))
plt.pause(0.001)
plt.draw()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_panelnd.py | 7 | 3952 | # -*- coding: utf-8 -*-
import nose
from pandas.core import panelnd
from pandas.core.panel import Panel
from pandas.util.testing import assert_panel_equal
import pandas.util.testing as tm
class TestPanelnd(tm.TestCase):
def setUp(self):
pass
def test_4d_construction(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_alt(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_error(self):
# create a 4D
self.assertRaises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.ix['C1', :, :, 0:3, :]
expected = p4d.ix[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
Lawrence-Liu/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
conversationai/wikidetox | antidox/perspective.py | 1 | 9999 | """ inputs comments to perspective and dlp apis and detects
toxicity and personal information> has support for csv files,
bigquery tables, and wikipedia talk pages"""
#TODO(tamajongnc): configure pipeline to distribute work to multiple machines
# pylint: disable=fixme, import-error
# pylint: disable=fixme, unused-import
import argparse
import json
import sys
from googleapiclient import discovery
from googleapiclient import errors as google_api_errors
import pandas as pd
import requests
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam import window
from antidox import clean
def get_client():
""" generates API client with personalized API key """
with open("api_key.json") as json_file:
apikey_data = json.load(json_file)
api_key = apikey_data['perspective_key']
# Generates API client object dynamically based on service name and version.
perspective = discovery.build('commentanalyzer', 'v1alpha1',
developerKey=api_key)
dlp = discovery.build('dlp', 'v2', developerKey=api_key)
return (apikey_data, perspective, dlp)
def perspective_request(perspective, comment):
""" Generates a request to run the toxicity report"""
analyze_request = {
'comment':{'text': comment},
'requestedAttributes': {'TOXICITY': {}, 'THREAT': {}, 'INSULT': {}}
}
response = perspective.comments().analyze(body=analyze_request).execute()
return response
def dlp_request(dlp, apikey_data, comment):
""" Generates a request to run the cloud dlp report"""
request_dlp = {
"item":{
"value":comment
},
"inspectConfig":{
"infoTypes":[
{
"name":"PHONE_NUMBER"
},
{
"name":"US_TOLLFREE_PHONE_NUMBER"
},
{
"name":"DATE_OF_BIRTH"
},
{
"name":"EMAIL_ADDRESS"
},
{
"name":"CREDIT_CARD_NUMBER"
},
{
"name":"IP_ADDRESS"
},
{
"name":"LOCATION"
},
{
"name":"PASSPORT"
},
{
"name":"PERSON_NAME"
},
{
"name":"ALL_BASIC"
}
],
"minLikelihood":"POSSIBLE",
"limits":{
"maxFindingsPerItem":0
},
"includeQuote":True
}
}
dlp_response = (dlp.projects().content().inspect(body=request_dlp,
parent='projects/'+
apikey_data['project_number']
).execute())
return dlp_response
def contains_pii(dlp_response):
""" Checking/returning comments that are likely or very likely to contain PII
Args:
passes in the resukts from the cloud DLP
"""
has_pii = False
if 'findings' not in dlp_response['result']:
return False, None
for finding in dlp_response['result']['findings']:
if finding['likelihood'] in ('LIKELY', 'VERY_LIKELY'):
has_pii = True
return (has_pii, finding['infoType']["name"])
return False, None
def contains_toxicity(perspective_response):
"""Checking/returning comments with a toxicity value of over 50 percent."""
is_toxic = False
if (perspective_response['attributeScores']['TOXICITY']['summaryScore']
['value'] >= .5):
is_toxic = True
return is_toxic
def get_wikipage(pagename):
""" Gets all content from a wikipedia page and turns it into plain text. """
# pylint: disable=fixme, line-too-long
page = ("https://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvprop=content&format=json&formatversion=2&titles="+(pagename))
get_page = requests.get(page)
response = json.loads(get_page.content)
text_response = response['query']['pages'][0]['revisions'][0]['content']
return text_response
def wiki_clean(get_wikipage):
"""cleans the comments from wikipedia pages"""
text = clean.content_clean(get_wikipage)
print(text)
return text
def use_query(content, sql_query, big_q):
"""make big query api request"""
query_job = big_q.query(sql_query)
rows = query_job.result()
strlst = []
for row in rows:
strlst.append(row[content])
return strlst
def set_pipeline_options(options):
gcloud_options = options.view_as(GoogleCloudOptions)
gcloud_options.project = 'google.com:new-project-242016'
gcloud_options.staging_location = 'gs://tj_cloud_bucket/stage'
gcloud_options.temp_location = 'gs://tj_cloud_bucket/tmp'
options.view_as(StandardOptions).runner = 'direct'
options.view_as(WorkerOptions).num_workers = 69
options.view_as(SetupOptions).save_main_session = True
# pylint: disable=fixme, too-many-locals
# pylint: disable=fixme, too-many-statements
def main(argv):
""" runs dlp and perspective on content passed in """
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input_file', help='Location of file to process')
parser.add_argument('--api_key', help='Location of perspective api key')
# pylint: disable=fixme, line-too-long
parser.add_argument('--sql_query', help='choose specifications for query search')
parser.add_argument('--csv_file', help='choose CSV file to process')
parser.add_argument('--wiki_pagename', help='insert the talk page name')
parser.add_argument('--content', help='specify a column in dataset to retreive data from')
parser.add_argument('--output', help='path for output file in cloud bucket')
parser.add_argument('--nd_output', help='gcs path to store ndjson results')
parser.add_argument('--project', help='project id for bigquery table', \
default='wikidetox-viz')
parser.add_argument('--gproject', help='gcp project id')
parser.add_argument('--temp_location', help='cloud storage path for temp files \
must begin with gs://')
args, pipe_args = parser.parse_known_args(argv)
options = PipelineOptions(pipe_args)
set_pipeline_options(options)
with beam.Pipeline(options=options) as pipeline:
if args.wiki_pagename:
wiki_response = get_wikipage(args.wiki_pagename)
wikitext = wiki_clean(wiki_response)
text = wikitext.split("\n")
comments = pipeline | beam.Create(text)
if args.csv_file:
comments = pipeline | 'ReadMyFile' >> beam.io.ReadFromText(pd.read_csv(args.csv_file))
if args.sql_query:
comments = (
pipeline
| 'QueryTable' >> beam.io.Read(beam.io.BigQuerySource(
query=args.sql_query,
use_standard_sql=True))
| beam.Map(lambda elem: elem[args.content]))
# pylint: disable=fixme, too-few-public-methods
class NDjson(beam.DoFn):
"""class for NDJson"""
# pylint: disable=fixme, no-self-use
# pylint: disable=fixme, inconsistent-return-statements
def process(self, element):
"""Takes toxicity and dlp results and converst them to NDjson"""
try:
dlp_response = dlp_request(dlp, apikey_data, element)
perspective_response = perspective_request(perspective, element)
contains_toxicity(perspective_response)
has_pii_bool, pii_type = contains_pii(dlp_response)
if contains_toxicity(perspective_response) or has_pii_bool:
data = {'comment': element,
'Toxicity': str(perspective_response['attributeScores']
['TOXICITY']['summaryScore']['value']),
'pii_detected':str(pii_type)}
return [json.dumps(data) + '\n']
except google_api_errors.HttpError as err:
print('error', err)
# pylint: disable=fixme, too-few-public-methods
class GetToxicity(beam.DoFn):
"""The DoFn to perform on each element in the input PCollection"""
# pylint: disable=fixme, no-self-use
# pylint: disable=fixme, inconsistent-return-statements
def process(self, element):
"""Runs every element of collection through perspective and dlp"""
print(repr(element))
print('==============================================\n')
if not element:
return None
try:
dlp_response = dlp_request(dlp, apikey_data, element)
perspective_response = perspective_request(perspective, element)
has_pii_bool, pii_type = contains_pii(dlp_response)
if contains_toxicity or has_pii_bool:
pii = [element+"\n"+'contains pii?'+"Yes"+"\n"+str(pii_type)+"\n" \
+"\n" +"contains TOXICITY?:"+"Yes"
+"\n"+str(perspective_response['attributeScores']
['TOXICITY']['summaryScore']['value'])+"\n"
+"=========================================="+"\n"]
return pii
except google_api_errors.HttpError as err:
print('error', err)
apikey_data, perspective, dlp = get_client()
results = comments \
| beam.ParDo(GetToxicity())
json_results = comments \
| beam.ParDo(NDjson())
# pylint: disable=fixme, expression-not-assigned
results | 'WriteToText' >> beam.io.WriteToText(
'gs://tj_cloud_bucket/beam.txt', num_shards=1)
json_results | 'WriteToText2' >> beam.io.WriteToText(
'gs://tj_cloud_bucket/results.json', num_shards=1)
if __name__ == '__main__':
main(sys.argv[1:]) | apache-2.0 |
ScazLab/snap_circuits | board_perception/part_classifier.py | 1 | 12583 | import os
import time
import json
from copy import deepcopy
import numpy as np
import cv2
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
# Location of board training data
DATA = os.path.join(os.path.dirname(__file__), '../data/')
BOARD_DATA = os.path.join(DATA, 'boards')
TRAIN_DATA_FILE = os.path.join(DATA, 'parts_training.npz')
CLASSIFIERS_FILE = os.path.join(DATA, 'classifiers.pkl')
N_ROWS = 7
N_COLUMNS = 10
# Board dimensions, in mm
H_MARGIN = 13.5 # Margins from first peg to detected board border
W_MARGIN = 14.
H_CELL = 168. / (N_ROWS - 1) # Cell dimensions from board dimensions
W_CELL = 251.5 / (N_COLUMNS - 1)
H = H_CELL * (N_ROWS - 1) + 2 * H_MARGIN # Total board dimensions
W = W_CELL * (N_COLUMNS - 1) + 2 * W_MARGIN
ORIENTATION_NAMES = ['NORTH', 'EAST', 'SOUTH', 'WEST']
ORIENTATIONS = {name: i for i, name in enumerate(ORIENTATION_NAMES)}
globals().update(ORIENTATIONS) # Define NORTH, EAST, ... as global variables
INVERSE_ORIENTATIONS = {'NORTH': 'SOUTH', 'EAST': 'WEST',
'SOUTH': 'NORTH', 'WEST': 'EAST'}
ROTATION = [(0, -1, 1, 0), (1, 0, 0, 1), (0, 1, -1, 0), (-1, 0, 0, -1)]
ROTATION = [np.array(r).reshape((2, 2)) for r in ROTATION]
# Location of the recognizable tag on parts.
# Expressed as shift from reference peg (generally top left in upright
# position, that is EAST orientation).
PART_TAG_LOCATION = {
'2': np.array([0, 0.5]),
'3': np.array([0, 0.5]),
'4': np.array([0, 1.5]),
'5': np.array([0, 1.5]),
'6': np.array([0, 2.5]),
'B1': np.array([0, 1.5]),
'C2': np.array([0, 1.5]),
'C4': np.array([0, 1.5]),
'D1': np.array([0, 1.5]),
'D8': np.array([0, 1.5]),
'D6': np.array([0, 1.5]),
'L1': np.array([0, 1.5]),
'M1': np.array([0, 1.5]),
'Q1': np.array([0.5, 1]),
'Q2': np.array([0.5, 1]),
'Q4': np.array([0, 1.5]),
'R1': np.array([0, 1.5]),
'R3': np.array([0, 1.5]),
'R5': np.array([0, 1.5]),
'RP': np.array([0, 0.5]),
'RV': np.array([0, 0.5]),
'S1': np.array([0, 1.5]),
'S2': np.array([0, 1.5]),
'U1': np.array([0.5, 1]),
'U2': np.array([0.5, 1]),
'U3': np.array([0.5, 1]),
'U22': np.array([1, 0.5]),
'U23': np.array([0.5, 1]),
'U24': np.array([0.5, 0]),
'WC': np.array([0, 1.5]),
'X1': np.array([0, 1.5]),
}
# Note: cells with no identified label are considered labelled as None with
# 0 as an orientation. This convention has to be enforced so that only one
# label is associated with empty cells (this is because the final label of
# a cell is the tuple (label of the part, orientation).
def cell_coordinate(row, column):
# Cell coordinate in mm
return (H_MARGIN + H_CELL * row, W_MARGIN + W_CELL * column)
def rotate_tag_location(loc, orientation):
"""Tag location from part reference point according to part orientation.
"""
loc = np.asarray(loc)
return ROTATION[orientation].dot(loc)
def tag_location_from_part(label, location, orientation):
diff = rotate_tag_location(PART_TAG_LOCATION[label], orientation)
return location + diff
def inverse_orientation(orientation):
return (orientation + 2) % 4
def part_reference_from_tag_location(label, loc, orientation):
return tag_location_from_part(label, loc, inverse_orientation(orientation))
def tag_from_part_coordinate(loc, label):
"""Returns the location of the part from its tag and label.
"""
row, col, ori = loc
drow, dcol, segment = PART_TAG_LOCATION[label]
def reverse_cell_location_triplet(loc):
"""Rotate part location by pi in the board."""
r, c, o = loc
r, c = ((N_ROWS - 1) - r, (N_COLUMNS - 1) - c)
return [r, c, INVERSE_ORIENTATIONS[o.upper()]]
def reverse_board_state(board):
"""Rotate parts location by pi in the board."""
rev = deepcopy(board)
for p in rev["parts"]:
p["location"] = reverse_cell_location_triplet(p["location"])
return rev
class CellExtractor:
def set_image(self, img):
self.img = img
steps_real = (H_CELL / 4., W_CELL / 4.)
self.dh, self.dw = self.image_coordinate(steps_real)
@property
def width(self):
return self.img.shape[0]
@property
def height(self):
return self.img.shape[1]
def image_coordinate(self, coordinates):
# Converts coordinates in mm to pixels
i, j = coordinates
return (int(i * self.width / H), int(j * self.height / W))
def cell_image(self, row, column):
i, j = self.image_coordinate(cell_coordinate(row, column))
return self.img[i - self.dh:i + self.dh, j - self.dw:j + self.dw, :]
def all_peg_indices(self, last_row=True, last_column=True):
for row in range(N_ROWS - (not last_row)):
for column in range(N_COLUMNS - (not last_column)):
yield (row, column)
def all_horizontal_cells(self):
for row, column in self.all_peg_indices(last_column=False):
yield (row, column + .5, self.cell_image(row, column + .5))
def all_vertical_cells(self):
for row, column in self.all_peg_indices(last_row=False):
yield (row + .5, column, self.cell_image(row + .5, column))
class LabeledCellExtractor:
"""Extracts cells with labels for training.
"""
def __init__(self, img, board_state):
self.cell_extr = CellExtractor()
self.cell_extr.set_image(img)
self.labels = {}
self.set_labels(board_state)
def set_labels(self, board_state):
"""Populates self.labels from board state.
self.labels[(row, column)] = (label, orientation)
"""
for part in board_state['parts']:
label = part['label']
part_loc = part['location']
orientation = ORIENTATIONS[part_loc[2].upper()]
tag_loc = tag_location_from_part(label, part_loc[:2], orientation)
self.labels[(tag_loc[0], tag_loc[1])] = (label, orientation)
def _to_label_and_cell(self, row, column, cell):
if (row, column) in self.labels:
return (self.labels[(row, column)], cell)
else:
return ((None, 0), cell)
def _to_labels_and_cells(self, cells):
return [self._to_label_and_cell(row, col, cell)
for (row, col, cell) in cells]
def labeled_cells(self):
"""Returns lists of horizontal and vertical cells with labels.
Labels are (label, orientation) and (Non, 0) for non-tag cells.
"""
horizontal = self._to_labels_and_cells(
self.cell_extr.all_horizontal_cells())
vertical = self._to_labels_and_cells(
self.cell_extr.all_vertical_cells())
return vertical, horizontal
class PartDetector:
def __init__(self, path=CLASSIFIERS_FILE):
self.extr = CellExtractor()
self.v_classifier, self.h_classifier = load_classifier(path=path)
def train_and_save(self):
raise NotImplemented
def analyse_board(self, board_image):
self.extr.set_image(board_image)
found = self._find_labels(self.extr.all_vertical_cells(), 'v')
found += self._find_labels(self.extr.all_horizontal_cells(), 'h')
# Compute part location from tag location
parts = [(label,
part_reference_from_tag_location(label, loc, orientation),
orientation)
for (label, loc, orientation) in found
if label not in [None, 'None']
# Apparently None is converted to string on classifier saving
]
return {'parts': [
{'id': i, 'label': label,
'location': [int(r), int(c), ORIENTATION_NAMES[o]]}
for i, (label, (r, c), o) in enumerate(parts)
]}
def _find_labels(self, cells, orientation):
cells = list(cells)
array = np.vstack([img.flatten() for r, c, img in cells])
if orientation == 'v':
classif = self.v_classifier
elif orientation == 'h':
classif = self.h_classifier
else:
raise ValueError()
labels = classif.predict(array)
return [(label, (r, c), orientation)
for (r, c, img), (label, orientation) in zip(cells, labels)
if label is not None
]
def _get_training_data_from(name, reverse=False, ext='png'):
with open(os.path.join(BOARD_DATA, name + '.json')) as b:
board = json.load(b)
if reverse:
board = reverse_board_state(board)
name += 'R'
img_path = os.path.join(BOARD_DATA, name + '.' + ext)
img = cv2.imread(img_path)
if img is None:
raise IOError('Could not find file: {}.'.format(img_path))
# Create alpha channel
extr = LabeledCellExtractor(img, board)
return extr.labeled_cells()
def _reverse_example(label_orientation, cell):
(label, orientation) = label_orientation
if label is not None:
orientation = inverse_orientation(orientation)
return ((label, orientation), cell[::-1, ::-1, :])
def save_training_data():
v_examples = []
h_examples = []
for name in ['board_{}'.format(1 + i) for i in range(8)]:
for r in [False, True]:
v, h = _get_training_data_from(name, reverse=r)
v_examples.extend(v)
h_examples.extend(h)
# Also use reverse image as training data
v_examples += [_reverse_example(l_o, c) for (l_o, c) in v_examples]
h_examples += [_reverse_example(l_o, c) for (l_o, c) in h_examples]
# Stack data into array
v_array = np.vstack([c.flatten() for (_, c) in v_examples])
h_array = np.vstack([c.flatten() for (_, c) in h_examples])
v_label = np.array([l_o for (l_o, _) in v_examples],
dtype=[('label', 'S8'), ('orientation', 'i1')])
h_label = np.array([l_o for (l_o, _) in h_examples],
dtype=[('label', 'S8'), ('orientation', 'i1')])
np.savez(TRAIN_DATA_FILE,
v_array=v_array, h_array=h_array,
v_label=v_label, h_label=h_label)
def check_training_data():
return os.path.isfile(TRAIN_DATA_FILE)
def load_training_data():
d = np.load(TRAIN_DATA_FILE)
return (d['v_array'], d['v_label'], d['h_array'], d['h_label'])
def train_classifier():
v_array, v_label, h_array, h_label = load_training_data()
print("Training horizontal classifier")
t = time.time()
h_classifier = SVC(kernel='linear')
h_classifier.fit(h_array, h_label)
print("Training time: {}s".format(time.time() - t))
print("Training vertical classifier")
t = time.time()
v_classifier = SVC(kernel='linear')
v_classifier.fit(v_array, v_label)
print("Training time: {}s".format(time.time() - t))
joblib.dump((v_classifier, h_classifier), CLASSIFIERS_FILE)
# Training error
t = time.time()
h_predicted = h_classifier.predict(h_array)
v_predicted = v_classifier.predict(v_array)
print("Prediction time: {}s".format(time.time() - t))
print("Confusion matrix:\n%s" % confusion_matrix(
[str(t) for t in h_label],
[str(t) for t in h_predicted]))
print("Confusion matrix:\n%s" % confusion_matrix(
[str(t) for t in v_label],
[str(t) for t in v_predicted]))
def evaluate_classifier():
v_classif, h_classif = load_classifier()
v_cells, h_cells = _get_training_data_from('board_evaluation')
# Also use reverse image as evaluation data
for cells in [v_cells, h_cells]:
cells.extend([_reverse_example(l_o, c) for (l_o, c) in cells])
# Stack data into array
v_array = np.vstack([c.flatten() for (_, c) in v_cells])
h_array = np.vstack([c.flatten() for (_, c) in h_cells])
v_label = np.array([l_o for (l_o, _) in v_cells],
dtype=[('label', 'S8'), ('orientation', 'i1')])
h_label = np.array([l_o for (l_o, _) in h_cells],
dtype=[('label', 'S8'), ('orientation', 'i1')])
v_pred = v_classif.predict(v_array)
h_pred = h_classif.predict(h_array)
print("Confusion matrices:")
print(confusion_matrix([str(t) for t in v_label],
[str(t) for t in v_pred]))
print(sorted(list(set([str(t) for t in list(v_label) + list(v_pred)]))))
print(confusion_matrix([str(t) for t in h_label],
[str(t) for t in h_pred]))
print(sorted(list(set([str(t) for t in list(h_label) + list(h_pred)]))))
def load_classifier(path=CLASSIFIERS_FILE):
return joblib.load(path)
| gpl-3.0 |
cactusbin/nyt | matplotlib/examples/pylab_examples/legend_auto.py | 3 | 2281 | """
This file was written to test matplotlib's autolegend placement
algorithm, but shows lots of different ways to create legends so is
useful as a general examples
Thanks to John Gill and Phil ?? for help at the matplotlib sprint at
pycon 2005 where the auto-legend support was written.
"""
from pylab import *
import sys
rcParams['legend.loc'] = 'best'
N = 100
x = arange(N)
def fig_1():
figure(1)
t = arange(0, 40.0 * pi, 0.1)
l, = plot(t, 100*sin(t), 'r', label='sine')
legend(framealpha=0.5)
def fig_2():
figure(2)
plot(x, 'o', label='x=y')
legend()
def fig_3():
figure(3)
plot(x, -x, 'o', label='x= -y')
legend()
def fig_4():
figure(4)
plot(x, ones(len(x)), 'o', label='y=1')
plot(x, -ones(len(x)), 'o', label='y=-1')
legend()
def fig_5():
figure(5)
n, bins, patches = hist(randn(1000), 40, normed=1)
l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)
legend([l, patches[0]], ['fit', 'hist'])
def fig_6():
figure(6)
plot(x, 50-x, 'o', label='y=1')
plot(x, x-50, 'o', label='y=-1')
legend()
def fig_7():
figure(7)
xx = x - (N/2.0)
plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$')
plot(xx, 25*xx, 'go', label='$y=25x$')
plot(xx, -25*xx, 'mo', label='$y=-25x$')
legend()
def fig_8():
figure(8)
b1 = bar(x, x, color='m')
b2 = bar(x, x[::-1], color='g')
legend([b1[0], b2[0]], ['up', 'down'])
def fig_9():
figure(9)
b1 = bar(x, -x)
b2 = bar(x, -x[::-1], color='r')
legend([b1[0], b2[0]], ['down', 'up'])
def fig_10():
figure(10)
b1 = bar(x, x, bottom=-100, color='m')
b2 = bar(x, x[::-1], bottom=-100, color='g')
b3 = bar(x, -x, bottom=100)
b4 = bar(x, -x[::-1], bottom=100, color='r')
legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left',
'top left', 'top right'])
if __name__ == '__main__':
nfigs = 10
figures = []
for f in sys.argv[1:]:
try:
figures.append(int(f))
except ValueError:
pass
if len(figures) == 0:
figures = range(1, nfigs+1)
for fig in figures:
fn_name = "fig_%d" % fig
fn = globals()[fn_name]
fn()
show()
| unlicense |
SpatialTranscriptomicsResearch/st_analysis | scripts/differential_analysis.py | 1 | 7792 | #! /usr/bin/env python
"""
This script performs Differential Expression Analysis
using DESeq2 or Scran + DESeq2 on ST datasets.
The script can take one or several datasets with the following format:
GeneA GeneB GeneC
1x1
1x2
...
Ideally, each dataset (matrix) would correspond to a region
of interest (Selection) to be compared.
The script also needs the list of comparisons to make (1 vs 2, etc..)
Each comparison will be performed between datasets and the input should be:
DATASET-DATASET DATASET-DATASET ...
The script will output the list of up-regulated and down-regulated genes
for each possible DEA comparison (between tables) as well as a set of volcano plots.
NOTE: soon Monocle and edgeR will be added
@Author Jose Fernandez Navarro <jose.fernandez.navarro@scilifelab.se>
"""
import argparse
import sys
import os
import numpy as np
import pandas as pd
from stanalysis.normalization import RimportLibrary
from stanalysis.preprocessing import compute_size_factors, aggregate_datatasets, remove_noise
from stanalysis.visualization import volcano
from stanalysis.analysis import deaDESeq2, deaScranDESeq2
import matplotlib.pyplot as plt
def main(counts_table_files, conditions, comparisons, outdir, fdr,
normalization, num_exp_spots, num_exp_genes, min_gene_expression):
if len(counts_table_files) == 0 or \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input file/s not present or invalid format\n")
sys.exit(1)
if not outdir or not os.path.isdir(outdir):
outdir = os.getcwd()
print("Output folder {}".format(outdir))
# Merge input datasets (Spots are rows and genes are columns)
counts = aggregate_datatasets(counts_table_files)
# Remove noisy spots and genes (Spots are rows and genes are columns)
counts = remove_noise(counts, num_exp_genes / 100.0, num_exp_spots / 100.0,
min_expression=min_gene_expression)
# Get the comparisons as tuples
comparisons = [c.split("-") for c in comparisons]
# Get the conditions
conds_repl = dict()
for cond in conditions:
d, c = cond.split(":")
conds_repl[d] = c
conds = list()
for spot in counts.index:
index = spot.split("_")[0]
try:
conds.append(conds_repl[index])
except KeyError:
counts.drop(spot, axis=0, inplace=True)
continue
# Write the conditions to a file
with open("conditions.txt", "w") as filehandler:
for cond in conds:
filehandler.write("{}\n".format(cond))
# Check that the comparisons are valid and if not remove the invalid ones
comparisons = [c for c in comparisons if c[0] in conds and c[1] in conds]
if len(comparisons) == 0:
sys.stderr.write("Error, the vector of comparisons is invalid\n")
sys.exit(1)
# Make the DEA call
print("Doing DEA for the comparisons {} with {} spots and {} genes".format(comparisons,
len(counts.index),
len(counts.columns)))
# Print the DE
counts.to_csv(os.path.join(outdir, "merged_matrix.tsv"), sep="\t")
# Spots as columns
counts = counts.transpose()
# DEA call
try:
if normalization in "DESeq2":
dea_results = deaDESeq2(counts, conds, comparisons, alpha=fdr, size_factors=None)
else:
dea_results = deaScranDESeq2(counts, conds, comparisons, alpha=fdr, scran_clusters=False)
except Exception as e:
sys.stderr.write("Error while performing DEA " + str(e) + "\n")
sys.exit(1)
assert(len(comparisons) == len(dea_results))
for dea_result, comp in zip(dea_results, comparisons):
# Filter results
dea_result = dea_result.loc[pd.notnull(dea_result["padj"])]
dea_result = dea_result.sort_values(by=["padj"], ascending=True, axis=0)
print("Writing DE genes to output using a FDR cut-off of {}".format(fdr))
dea_result.to_csv(os.path.join(outdir,
"dea_results_{}_vs_{}.tsv"
.format(comp[0], comp[1])), sep="\t")
dea_result.ix[dea_result["padj"] <= fdr].to_csv(os.path.join(outdir,
"filtered_dea_results_{}_vs_{}.tsv"
.format(comp[0], comp[1])), sep="\t")
# Volcano plot
print("Writing volcano plot to output")
outfile = os.path.join(outdir, "volcano_{}_vs_{}.pdf".format(comp[0], comp[1]))
volcano(dea_result, fdr, outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--counts-table-files", required=True, nargs='+', type=str,
help="One or more matrices with gene counts per feature/spot (genes as columns)")
parser.add_argument("--normalization", default="DESeq2", metavar="[STR]",
type=str,
choices=["DESeq2", "Scran"],
help="Normalize the counts using:\n" \
"DESeq2 = DESeq2::estimateSizeFactors(counts)\n" \
"Scran = Deconvolution Sum Factors (Marioni et al)\n" \
"(default: %(default)s)")
parser.add_argument("--conditions", required=True, nargs='+', type=str,
help="One of more tuples that represent what conditions to give to each dataset.\n" \
"The notation is simple: DATASET:CONDITION DATASET:CONDITION ...\n" \
"For example 0:A 1:A 2:B 3:C. Note that datasets numbers start by 0.")
parser.add_argument("--comparisons", required=True, nargs='+', type=str,
help="One of more tuples that represent what comparisons to make in the DEA.\n" \
"The notation is simple: CONDITION-CONDITION CONDITION-CONDITION ...\n" \
"For example A-B A-C. Note that the conditions must be the same as in the parameter --conditions.")
parser.add_argument("--num-exp-genes", default=10, metavar="[INT]", type=int, choices=range(0, 100),
help="The percentage of number of expressed genes (>= --min-gene-expression) a spot\n" \
"must have to be kept from the distribution of all expressed genes (default: %(default)s)")
parser.add_argument("--num-exp-spots", default=1, metavar="[INT]", type=int, choices=range(0, 100),
help="The percentage of number of expressed spots a gene " \
"must have to be kept from the total number of spots (default: %(default)s)")
parser.add_argument("--min-gene-expression", default=1, type=int, choices=range(1, 50),
help="The minimum count (number of reads) a gene must have in a spot to be "
"considered expressed (default: %(default)s)")
parser.add_argument("--fdr", type=float, default=0.01,
help="The FDR minimum confidence threshold (default: %(default)s)")
parser.add_argument("--outdir", help="Path to output dir")
args = parser.parse_args()
main(args.counts_table_files, args.conditions, args.comparisons, args.outdir,
args.fdr, args.normalization, args.num_exp_spots, args.num_exp_genes,
args.min_gene_expression) | mit |
anurag313/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 72 | 25573 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
arizona-phonological-imaging-lab/Autotrace | matlab-version/image_diversityNEW.py | 3 | 17500 | #!/usr/bin/env python
"""
image_diversityNEW.py
Rewritten by Gus Hahn-Powell on March 7 2014
based on ~2010 code by Jeff Berry
purpose:
This script measures the distance from average for each image in the
input set, and copies the specified number of highest scoring images
to a new folder called 'diverse'. If ROI_config.txt is present in the
same folder as the input images, the ROI in that file will be used to
do the measurement. If not present, it will use a hard-coded default ROI.
usage:
python image_diversity.py
"""
import cv
import re
import shutil
import os, sys
import operator
from numpy import *
from collections import defaultdict
import subprocess as sp
import multiprocessing as mp
import matplotlib.pyplot as plot
import gtk
import gtk.glade
log_file = os.path.join(os.getcwd(), "tmp_log")
image_extension_pattern = re.compile("(\.(png|jpg)$)", re.IGNORECASE)
'''
#change this to make use of multiprocessing.pool?
class CopyThread(multiprocessing.Process):
def run(self):
flag = 'ok'
while (flag != 'stop'):
cmd = CopyQueue.get()
if cmd == None:
flag = 'stop'
else:
#print ' '.join(cmd)
p = sp.Popen(cmd)
p.wait()
FinishQueue.put(cmd)
#print "CopyThread stopped"
'''
class ImageWindow:
"""
"""
def __init__(self):
gladefile = "ImageDiversity.glade"
self.wTree = gtk.glade.XML(gladefile, "window1")
self.win = self.wTree.get_widget("window1")
self.win.set_title("Image Diversity")
dic = { "on_window1_destroy" : gtk.main_quit,
"on_open1_clicked" : self.openImages,
"on_open2_clicked" : self.openDest,
"on_ok_clicked" : self.onOK}
self.wTree.signal_autoconnect(dic)
self.srcfileentry = self.wTree.get_widget("srcfileentry")
self.dstfileentry = self.wTree.get_widget("dstfileentry")
#initialized to None...
self.destpath = None
self.train_most = self.wTree.get_widget("train_most") #Select N images
self.train_least = self.wTree.get_widget("train_least") #Select n test?
self.test_most = self.wTree.get_widget("test_most")
self.test_least = self.wTree.get_widget("test_least")
self.remaining = self.wTree.get_widget("remaining")
self.batches = self.wTree.get_widget("batches")
#assign 0 if not coercible to type int...
self.safe_set_all()
self.train_most.connect("changed", self.update_remaining)
self.train_least.connect("changed", self.update_remaining)
self.test_most.connect("changed", self.update_remaining)
self.test_least.connect("changed", self.update_remaining)
self.batches.connect("changed", self.update_remaining)
self.images = []
self.traces = []
self.images_dir = None
self.traces_dir = None
self.n = len(self.images)
self.remaining.set_text(str(self.n))
self.update_remaining()
self.log_file = ""
def logger(self, message, log_file=log_file):
"""
logger
"""
with open(log_file, 'a') as lg:
lg.write("{0}\n".format(message))
def get_roi(self):
"""
Get Region of Interest (RoI) for selected images
"""
# get an image and open it to see the size
img = cv.LoadImageM(self.images[0], iscolor=False)
self.csize = shape(img)
self.img = asarray(img)
#open up the ROI_config.txt and parse
self.logger("images_dir: {0}".format(self.images_dir))
#see if the ROI_config.txt file exists at the specified directory...should we instead launch SelectROI.py?
self.config = os.path.join(self.images_dir,'ROI_config.txt') if os.path.exists(os.path.join(self.images_dir,'ROI_config.txt')) else None
self.logger("self.config: {0}".format(self.config))
if self.config:
self.logger("Found ROI_config.txt")
c = open(self.config, 'r').readlines()
self.top = int(c[1][:-1].split('\t')[1])
self.bottom = int(c[2][:-1].split('\t')[1])
self.left = int(c[3][:-1].split('\t')[1])
self.right = int(c[4][:-1].split('\t')[1])
self.logger("using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right))
else:
self.logger("ROI_config.txt not found")
self.top = 140 #default settings for the Sonosite Titan
self.bottom = 320
self.left = 250
self.right = 580
self.logger("using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right))
roi = img[self.top:self.bottom, self.left:self.right]
self.roisize = shape(roi)
def safe_set(self, entry, value=""):
"""
Make sure entered text is coercible to type int
"""
try:
int(entry.get_text())
except:
entry.set_text(value)
def safe_set_all(self, value=""):
"""
"""
entries = [self.train_most, self.train_least, self.test_most, self.test_least, self.remaining, self.batches]
for entry in entries:
try:
int(entry.get_text())
except:
entry.set_text(value)
def safe_get(self, entry):
"""
Safely return an int (default is 0)
from a specified entry
"""
try:
return int(entry.get_text())
except:
return 0
def openImages(self, event):
"""
Allows user to select multiple images (jpg or png)
"""
fc = gtk.FileChooserDialog(title='Select Image Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Image Files')
ffilter.add_pattern('*.jpg')
ffilter.add_pattern('*.png')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.images_dir = fc.get_current_folder() #set this to an attribute?
self.images = [os.path.join(self.images_dir, f) for f in fc.get_filenames() if re.search(image_extension_pattern, f)]
self.logger("{0} images found".format(len(self.images)))
self.logger("images: {0}".format("\n".join(self.images)))
self.n = len(self.images)
self.update_remaining()
self.srcfileentry.set_text(self.images_dir)
fc.destroy()
self.get_roi()
self.openTraces()
def openTraces(self):
"""
Allows user to select multiple trace files (traced.txt)
"""
fc = gtk.FileChooserDialog(title='Select Trace Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else self.images_dir
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Trace Files')
ffilter.add_pattern('*.traced.txt')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.traces_dir = fc.get_current_folder() #set this to an attribute?
#should probably filter traces here (make sure images and traces match)
self.traces = [os.path.join(self.images_dir, f) for f in fc.get_filenames() if "traced.txt" in f]
self.logger("{0} traces found".format(len(self.traces)))
self.logger("traces: {0}".format("\n".join(self.traces)))
fc.destroy()
self.get_tracenames()
def openDest(self, event):
"""
"""
fc = gtk.FileChooserDialog(title='Select Save Destination', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.destpath = fc.get_current_folder()
self.dstfileentry.set_text(self.destpath)
self.log_file = os.path.join(self.destpath, "diversity_log")
fc.destroy()
def makeDest(self):
"""
"""
#TODO: add this into openDest?
diverse_dir = os.path.join(self.destpath, "diverse")
self.logger("images will be saved in " + diverse_dir)
if not os.path.isdir(diverse_dir):
os.mkdir(diverse_dir)
self.logger("created directory" + diverse_dir)
def get_tracenames(self):
"""
This method will look for existing trace files and create a dictionary to corresponding
image files. It will only work if all image files are in the same directory
"""
#3/8/2014 (Gus): Changed to support multiple corresponding traces...
self.tracenames = defaultdict(list)
for image in self.images:
#get image name...
image_name = os.path.basename(image)
for trace in self.traces:
#get trace name...
trace_name = os.path.basename(trace)
if image_name in trace_name:
self.logger("image: {0}\ttrace: {1}".format(image_name, trace_name))
self.tracenames[image].append(trace)
def update_remaining(self, *args):
"""
update the number of images available
for training and test sets, given user's
input
"""
self.safe_set_all()
#need to safely get a value or assign zero if nothing
self.check_remaining()
#print "remaining: {0}".format(self.remaining.get_text())
self.remaining.set_text(str(self.n - self.safe_get(self.train_most) - self.safe_get(self.train_least)))
#make sure we don't have more batches than remaining...
if self.safe_get(self.batches) > self.safe_get(self.remaining):
self.batches.set_text(str(self.remaining))
def check_remaining(self):
"""
"""
#test values come out of training numbers, not overall pool
#rest test_most if value exceeds possible
self.safe_set_all()
if self.safe_get(self.test_most) > self.safe_get(self.train_most):
self.test_most.set_text("")
if self.safe_get(self.test_least) > self.safe_get(self.train_least):
self.test_least.set_text("")
#did we try to pick too many items?
if self.safe_get(self.train_most) + self.safe_get(self.train_least) > self.n:
self.train_most.set_text("")
self.train_least.set_text("")
def get_average_image(self):
"""
creates an average image from
a set of images and a corresponding RoI
"""
files = self.images
ave_img = zeros(self.roisize)
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
ave_img += roi
ave_img /= len(files)
return ave_img, files
def make_train_test(self, images, training_n, testing_n=None):
"""
takes a list of images and test and training sizes
returns two lists of non-overlapping images (training, testing)
"""
images_array = array(images)
images_indices = arange(len(images_array))
random.shuffle(images_indices)
traininds = images_indices[:training_n]
trainfiles = images_array[traininds]
testfiles = []
#make sure we have a test set
if testing_n:
testinds = images_indices[training_n:training_n+testing_n]
testfiles = images_array[testinds]
#return training, testing
return list(trainfiles), list(testfiles)
def move_files(self, images, destination, image_class="??"):
"""
"""
#move our test files...
self.logger("Moving {0} {1} files...".format(len(images), image_class))
for image in images:
image_name = os.path.basename(image)
dest = os.path.join(destination, image_name)
shutil.copy(image, dest)
if image in self.tracenames:
#should I average the traces instead?
for trace in self.tracenames[image]:
trace_name = os.path.basename(trace)
dest = os.path.join(destination, trace_name)
self.logger("image: {0}".format(image))
self.logger("trace source: {0}".format(trace))
self.logger("trace dest: {0}\n".format(dest))
shutil.copy(trace, dest)
def plot_diversity(self, sorted_results):
"""
"""
#show rank vs. energy plot
count = 0
for (i,j) in sorted_results:
count += 1
plot.plot(count, j, 'b.')
#add confirmation dialog that prompts for save location when ok is clicked
#plot.savefig(os.path.join(self.destpath, 'rankVenergy.png'))
plot.title("rank vs. energy plot for {0} images".format(count))
plot.ylabel('Diversity score')
plot.xlabel('Rank')
#remove x axis ticks
#plot.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plot.show()
def get_diverse(self):
"""
get specified diversity set and
then copy relevant files to specified location
"""
batches = self.safe_get(self.batches)
if os.path.isdir(self.destpath):
self.logger("calculating average image...")
ave_img, files = self.get_average_image()
self.logger("measuring distances from average...")
results = {}
for i in range(len(self.images)):
img = cv.LoadImageM(self.images[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
dif_img = abs(roi - ave_img)
results[self.images[i]] = sum(sum(dif_img))
sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1), reverse=True)
#plot rank vs diversity
self.plot_diversity(sorted_results)
most_diverse_n = self.safe_get(self.train_most)
least_diverse_n = self.safe_get(self.train_least)
test_most_diverse_n = self.safe_get(self.test_most)
test_least_diverse_n = self.safe_get(self.test_least)
training_most_diverse_n = most_diverse_n - test_most_diverse_n
training_least_diverse_n = least_diverse_n - test_least_diverse_n
test_size = test_most_diverse_n + test_least_diverse_n
self.logger("test size: {0}".format(test_size))
#remove test size from training size...
train_size = most_diverse_n + least_diverse_n - test_size
self.logger("training size: {0}".format(train_size))
all_images = [image for (image, _) in sorted_results]
most_diverse_images = []
least_diverse_images = []
#get n most diverse...
if most_diverse_n > 0:
self.logger("Selecting {0} most diverse images...".format(most_diverse_n))
for (image, score) in sorted_results[:most_diverse_n]:
self.logger("file: {0}\ndiversity score: {1}\n".format(image, score))
most_diverse_images.append(image)
#get most diverse for testing and training...
training_most_diverse, testing_most_diverse = self.make_train_test(most_diverse_images, training_n=training_most_diverse_n, testing_n=test_most_diverse_n)
else:
training_most_diverse = []
testing_most_diverse = []
#get n least diverse...
if least_diverse_n > 0:
self.logger("Selecting {0} least diverse images...".format(least_diverse_n))
#take the specified n least diverse...
for (image, score) in sorted_results[-1*least_diverse_n:]:
self.logger("file: {0}\ndiversity score: {1}\n".format(image, score))
least_diverse_images.append(image)
#get least diverse for testing and training...
training_least_diverse, testing_least_diverse = self.make_train_test(least_diverse_images, training_n=training_least_diverse_n, testing_n=test_least_diverse_n)
else:
training_least_diverse = []
testing_least_diverse = []
#make test, training, and batch file sets...
trainfiles = training_most_diverse + training_least_diverse
testfiles = testing_most_diverse + testing_least_diverse
#find remaining...
selected = set(trainfiles + testfiles)
remainingfiles = [image for image in all_images if image not in selected]
#prepare directory for training files...
self.traindir = os.path.join(self.destpath, "train")
if not os.path.isdir(self.traindir):
os.mkdir(self.traindir)
#move training files (edit this)...
self.move_files(trainfiles, destination=self.traindir, image_class="training")
#are we generating a test set?
if test_size > 0:
#prepare directory for test files...
self.testdir = os.path.join(self.destpath, "test")
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
#move our test files...
self.move_files(testfiles, destination=self.testdir, image_class="test")
#get remaining and make n batches...
if batches > 0:
b_num = 1
#numpy trick works here...
for batch_files in array_split(array(remainingfiles), batches):
#pad batch folder name with some zeros
batch_name = "batch%03d" % (b_num)
self.logger("files in {0}: {1}".format(batch_name, len(batch_files)))
batch_dir = os.path.join(self.destpath, batch_name)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
#move batch files
self.move_files(batch_files, destination=batch_dir, image_class=batch_name)
#increment batch...
b_num+=1
# write sorted_results to a .txt file for future reference
# added Mar 10 2011 by Jeff Berry
o = open(os.path.join(self.destpath, 'SortedResults.txt'), 'w')
for (i,j) in sorted_results:
o.write("%s\t%.4f\n" %(i, j))
o.close()
#move ROI file...
roifile = os.path.join(self.images_dir, "ROI_config.txt")
if os.path.isfile(roifile):
self.logger("moving ROI_config.txt to {0}".format(roifile))
shutil.copy(self.destpath, "ROI_config.txt")
def onOK(self, event):
"""
"""
if not self.destpath or not self.images or self.safe_get(self.train_most) == 0:
#run error dialog and return...
error_dialog = gtk.MessageDialog(parent=None, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_CLOSE, message_format="Some of your settings are missing...")
error_dialog.run()
error_dialog.destroy()
return
self.get_roi()
self.get_diverse()
gtk.main_quit()
self.logger("exiting...")
shutil.move(log_file, self.log_file)
if __name__ == "__main__":
ImageWindow()
gtk.main()
| mit |
iychoi/syndicate | planetlab/graphs/scatter.py | 2 | 4707 | """
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import matplotlib.pyplot as plt
import numpy as np
import common
def default_styles( length ):
markers = ['o', '+', '*', '^', '.', 'x', 'v', '<', '>', '|', 'D', 'H', 'h', '_', 'p', '8']
ret = []
for i in xrange(0,length):
ret.append(markers[ i % len(markers) ])
return ret
def make_lines( aggregate, yerror_aggregate=None, series_order=None, x_labels=None, x_ticks=None, series_labels=False, point_yoffs=None, legend_labels=None, styles=None, title="Title", xlabel="X", ylabel="Y", x_range=None, y_range=None, y_res=None, legend_pos=None, x_log=False, y_log=False ):
if series_order == None:
series_order = common.graph_default_order( aggregate )
y_data_series, yerror_series = common.graph_series( aggregate, yerror_aggregate, series_order )
x_data_series = None
if x_ticks != None:
x_data_series = x_ticks
else:
x_data_series = range(0, len(y_data_series))
data_series = []
yerror = []
for i in xrange(0, len(y_data_series[0])):
data_series.append( [] )
yerror.append( [] )
for i in xrange(0, len(y_data_series)):
xs = [i] * len(y_data_series[i])
pts = zip(xs, y_data_series[i])
k = 0
for j in xrange(0,len(data_series)):
data_series[j].append( pts[k] )
yerror[j].append( yerror_series[j][k] )
k += 1
fig = plt.figure()
ax = fig.add_subplot( 111 )
lines = []
if styles == None:
styles = default_styles( len(data_series) )
for i in xrange(0,len(data_series)):
x_series = [x for (x,y) in data_series[i]]
y_series = [y for (x,y) in data_series[i]]
style = 'k'
if styles != None:
if styles[i] != None:
style = styles[i]
ll, = ax.plot( x_series, y_series, style, markersize=10 )
lines.append(ll)
if yerror != None:
if yerror[i] != None:
ax.errorbar( x_series, y_series, yerr=yerror[i] )
# apply labels
ax.set_xlabel( xlabel )
ax.set_ylabel( ylabel )
ax.set_title( title )
if legend_labels == None:
legend_labels = common.graph_legend_labels( series_order )
if legend_labels != None:
kwargs={}
if legend_pos != None:
kwargs['loc'] = legend_pos
ax.legend( lines, legend_labels, **kwargs )
if x_ticks == None:
x_ticks = x_data_series
if x_labels == None:
x_labels = [str(x) for x in x_ticks]
if x_labels != None and x_ticks != None:
ax.set_xticks( x_ticks )
ax.set_xticklabels( x_labels )
ax.autoscale()
if y_res != None and y_range != None:
ax.set_yticks( np.arange(y_range[0], y_range[1], y_res) )
if x_range != None:
ax.set_autoscalex_on(False)
ax.set_xlim( [x_range[0], x_range[1]] )
if y_range != None:
ax.set_autoscaley_on(False)
ax.set_ylim( [y_range[0], y_range[1]] )
# label points
if series_labels:
j=0
for ll in lines:
x_series = ll.get_xdata()
y_series = ll.get_ydata()
i = 0
for (x,y) in zip(x_series, y_series):
yoff = y
if point_yoffs:
yoff = y + point_yoffs[j][i]
i+=1
ax.text( x, yoff, '%3.2f' % float(y), ha = 'left', va = 'bottom' )
j+=1
if x_log:
ax.set_xscale('log')
if y_log:
ax.set_yscale('log', nonposy="clip")
if __name__ == "__main__":
data, error = common.mock_experiment( ".scatter_experiment", 3, 3 )
series_order = []
steps = []
keys = []
for step in data.keys():
steps.append(step)
for key in data[step].keys():
keys.append(key)
steps = set(steps)
keys = set(keys)
steps = list(steps)
keys = list(keys)
steps.sort()
keys.sort()
for step in steps:
for key in keys:
series_order.append( (step, key) )
make_lines( data, yerror_aggregate = error, series_order = series_order, y_range = [0, 15], series_labels = True, legend_labels=["Average", "Median", "90th Percentile", "99th Percentile"] )
plt.show()
| apache-2.0 |
mvpossum/deep-learning | tp3/base.py | 1 | 9188 | from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import os
from keras.utils import np_utils
from keras import backend as K
from scipy import misc
from time import time, strftime, localtime
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from shutil import rmtree
from keras.callbacks import ModelCheckpoint
import h5py
nb_classes = 91
# input image dimensions
img_rows, img_cols = 32, 32
input_shape = (1, img_rows, img_cols) if K.image_dim_ordering() == 'th' else (img_rows, img_cols, 1)
''' Esta clase permite agregarle nuevos procesamientos al DataGenerator
Ejemplo de generador que invierte los colores:
datagen = ExtensibleImageDataGenerator(
rescale=1/255
).add(lambda x: 1-x)
Nota: Los add se pueden 'apilar'
'''
class ExtensibleImageDataGenerator(ImageDataGenerator):
def __init__(self, **kwargs):
super(ExtensibleImageDataGenerator, self).__init__(**kwargs)
self.custom_processings = lambda x:x
def standardize(self, x):
x = super(ExtensibleImageDataGenerator, self).standardize(x)
return self.custom_processings(x)
def add(self, g):
custom_processings = self.custom_processings
self.custom_processings = lambda x: g(custom_processings(x))
return self
default_datagen = ExtensibleImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=1/255.,
dim_ordering=K.image_dim_ordering(),
).add(lambda x: 1-x)
def to_categorical(y):#to_categorical([0 0 1 0 0]) = 2, porque el 1 esta en la posicion 2
return np.nonzero(y)[0][0]
class BaseDataset(object):
def __init__(self, name):
self.name = name
def evaluate(self, model, val_samples=50000, **kwargs):
gen = self.get_gen()
if gen is not None:
score = model.evaluate_generator(gen, val_samples=val_samples, **kwargs)
for i in range(len(model.metrics_names)):
print('{} {}: {:2f}'.format(self.name, model.metrics_names[i], score[i]))
else:
print("No hay data!")
# genera una vista previa de las imagenes procesadas
def preview(self, directory="preview"):
def prepare_show(face):
m, M = face.min(), face.max()
return (face-m)/(M-m)
if os.path.exists(directory):
rmtree(directory)
os.makedirs(directory)
X_batch, y_batch = self.get_gen().next()
for i,(img, y) in enumerate(zip(X_batch, y_batch)):
misc.imsave(os.path.join(directory, str(to_categorical(y)).zfill(3)+'-'+str(i)+'.png'), prepare_show(img.reshape(img_rows, img_cols)))
class LazyDataset(BaseDataset):
def __init__(self, directory, name=None, datagen=None, batch_size=128, **kwargs):
if datagen is None: datagen=default_datagen
super(LazyDataset, self).__init__(name if name else directory)
self.gen_gen = lambda: (print('Cargando {}...'.format(self.name)), datagen.flow_from_directory(directory=directory, target_size=(img_rows, img_cols), color_mode='grayscale', batch_size=batch_size, **kwargs))[1]
self.gen = None
def get_XY(self):
return self.get_gen().next()
def get_gen(self):
if self.gen is None: self.gen = self.gen_gen()
return self.gen
class H5Dataset(BaseDataset):
def __init__(self, h5file, name=None, datagen=None, batch_size=128, **kwargs):
super(H5Dataset, self).__init__(name if name else h5file)
self.h5file = h5file
self.batch_size = batch_size
self.X = self.Y = None
self.datagen = datagen
self.load_data()
def load_data(self):
print("Cargando {}...".format(self.name))
with h5py.File(self.h5file,'r') as hf:
self.X = np.array(hf.get('X'))
self.Y = np.array(hf.get('Y'))
print("Found {} images belonging to {} classes.".format(self.Y.shape[0], self.Y.shape[1]))
if self.datagen is not None:
self.datagen.fit(self.X)
def get_XY(self):
if self.X is None: self.load_data()
return self.X, self.Y
def filter(self, f):
X, Y = self.get_XY()
self.X = []
self.Y = []
for x,y in zip(X,Y):
if f(x, y):
self.X.append(x)
self.Y.append(y)
self.X = np.array(self.X)
self.Y = np.array(self.Y)
print("Se filtraron %d imagenes." % (self.X.shape[0]))
def get_gen(self):
X, Y = self.get_XY()
datagen = self.datagen if self.datagen is not None else ImageDataGenerator()
return datagen.flow(X, Y, batch_size=self.batch_size)
def dataset(source, name=None, datagen=None, batch_size=128, **kwargs):
if os.path.splitext(source)[1]=='.h5':
return H5Dataset(source, name, datagen, batch_size, **kwargs)
else:
return LazyDataset(source, name, datagen, batch_size, **kwargs)
class NameGen(object):
def __init__(self, base_name):
self.name = base_name + '--' + strftime("%d-%b-%Y--%H-%M", localtime())
def get_name(self):
return self.name
def get_file(self, dire, suffix):
if not os.path.exists(dire):
os.makedirs(dire)
return os.path.join(dire, self.name + '--' + suffix)
def get_model_file(self, suffix):
return self.get_file("models", suffix)
def get_history_file(self, suffix):
return self.get_file("histories", suffix)
class Trainer(object):
def __init__(self, name, train_data, valid_data, test_data):
self.namegen = NameGen(name)
self.train_data, self.valid_data, self.test_data = train_data, valid_data, test_data
def save_model_struct(self, model):
with open(self.namegen.get_model_file('model-struct.json'), "w") as text_file:
text_file.write(model.to_json())
def train(self, model, samples_per_epoch=269018, nb_epoch=12, verbose=1, nb_val_samples=25000, **kwargs):
print("Entrenando red: "+self.namegen.get_name())
self.save_model_struct(model)
checkpointer = ModelCheckpoint(filepath=self.namegen.get_model_file('model-train-weights.h5'), save_weights_only=True, monitor='val_acc', verbose=1, save_best_only=True)
self.history = model.fit_generator(self.train_data.get_gen(), samples_per_epoch=samples_per_epoch, nb_epoch=nb_epoch,
verbose=verbose, validation_data=self.valid_data.get_gen(), nb_val_samples=nb_val_samples, callbacks=[checkpointer], **kwargs)
self.save_model(model)
self.save_last_train_history()
def evaluate(self, model):
print("Evaluando modelo...")
self.train_data.evaluate(model)
self.valid_data.evaluate(model)
self.test_data.evaluate(model)
def save_model(self, model):
restore=False
if 'top3' in model.model.metrics_names:
idx=model.model.metrics_names.index('top3')
metric_name = model.model.metrics_names[idx]
del model.model.metrics_names[idx]
metric = model.model.metrics[idx-1]
del model.model.metrics[idx-1]
metric_tensor = model.model.metrics_tensors[idx-1]
del model.model.metrics_tensors[idx-1]
restore = True
file_name = self.namegen.get_model_file('model.h5')
print("Guardando pesos en "+file_name+"...")
self.save_model_struct(model)
model.save(file_name)
if restore:
model.model.metrics_names.insert(idx, metric_name)
model.model.metrics.insert(idx-1, metric)
model.model.metrics_tensors.insert(idx-1, metric_tensor)
def save_last_train_history(self):
print("Guardando historial...")
# summarize history for accuracy
plt.plot(self.history.history['acc'], 'bo-')
plt.plot(self.history.history['val_acc'], 'go-')
plt.title('')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.grid(True)
plt.legend(['Conj. Entrenamiento', 'Conj. Validacion'], loc='lower right')
plt.savefig(self.namegen.get_history_file('acc.png'), bbox_inches='tight', dpi = 300)
plt.clf()
# summarize history for loss
plt.plot(self.history.history['loss'], 'bo-')
plt.plot(self.history.history['val_loss'], 'go-')
plt.title('')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.grid(True)
plt.legend(['Conj. Entrenamiento', 'Conj. Validacion'], loc='upper right')
plt.savefig(self.namegen.get_history_file('loss.png'), bbox_inches='tight', dpi = 300)
plt.clf()
| mit |
MechCoder/scikit-learn | examples/model_selection/plot_learning_curve.py | 76 | 4509 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
henryroe/NIHTS-xcam | setup.py | 2 | 5385 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
import os
base_dir = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(base_dir, "nihts_xcam", "__about__.py")) as f:
exec(f.read(), about)
# Get the long description from the relevant file, converting from md to rst if possible
with open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
try:
from pypandoc import convert
long_description = convert('README.md', 'rst', format='md')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
setup(
name=about["__title__"],
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
# The project's main homepage.
url=about["__uri__"],
# Author details
author=about["__author__"],
author_email=about["__email__"],
# Choose your license
license=about["__license__"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# Development Status :: 1 - Planning
# Development Status :: 2 - Pre-Alpha
# Development Status :: 3 - Alpha
# Development Status :: 4 - Beta
# Development Status :: 5 - Production/Stable
# Development Status :: 6 - Mature
# Development Status :: 7 - Inactive
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6', # no reason to believe won't work on 2.6, but am not testing
'Programming Language :: Python :: 2.7',
# Intention is to write to v3 compatibility and eventually test, but am not doing that as of now (2015-03-06)
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='astronomy image viewer fits',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# Note: stompy is needed for ActiveMQ integration, but can be safely ignored unless needed
# Note: astropy will require numpy, so no need to specify here (had been: 'numpy>=1.8.1')
# Note: matplotlib version requirement could maybe be even earlier as we're not doing anything bleeding edge.
# Note: scipy not required, but highly recommended and some functionality may be lost without it.
# (at time of writing, v0.1.0, you lost some of the analysis in the phot_panel.py)
install_requires=['astropy_helpers>=1.0.0', 'astropy>=1.0.0'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
# 2015-03-06: haven't considered if we need/want any of these
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# 2015-03-06: haven't considered if we need/want any of these
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# 2015-03-06: haven't considered if we need/want any of these
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# 2015-03-06: haven't considered if we need/want any of these
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| mit |
ruohoruotsi/librosa | librosa/segment.py | 1 | 21884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Temporal segmentation
=====================
Recurrence and self-similarity
------------------------------
.. autosummary::
:toctree: generated/
recurrence_matrix
recurrence_to_lag
lag_to_recurrence
timelag_filter
Temporal clustering
-------------------
.. autosummary::
:toctree: generated/
agglomerative
subsegment
"""
from decorator import decorator
import numpy as np
import scipy
import scipy.signal
import sklearn
import sklearn.cluster
import sklearn.feature_extraction
import sklearn.neighbors
from . import cache
from . import util
from .util.exceptions import ParameterError
__all__ = ['recurrence_matrix',
'recurrence_to_lag',
'lag_to_recurrence',
'timelag_filter',
'agglomerative',
'subsegment']
@cache(level=30)
def recurrence_matrix(data, k=None, width=1, metric='euclidean',
sym=False, sparse=False, mode='connectivity',
bandwidth=None, axis=-1):
'''Compute a recurrence matrix from a data matrix.
`rec[i, j]` is non-zero if (`data[:, i]`, `data[:, j]`) are
k-nearest-neighbors and `|i - j| >= width`
Parameters
----------
data : np.ndarray
A feature matrix
k : int > 0 [scalar] or None
the number of nearest-neighbors for each sample
Default: `k = 2 * ceil(sqrt(t - 2 * width + 1))`,
or `k = 2` if `t <= 2 * width + 1`
width : int >= 1 [scalar]
only link neighbors `(data[:, i], data[:, j])`
if `|i - j| >= width`
metric : str
Distance metric to use for nearest-neighbor calculation.
See `sklearn.neighbors.NearestNeighbors` for details.
sym : bool [scalar]
set `sym=True` to only link mutual nearest-neighbors
sparse : bool [scalar]
if False, returns a dense type (ndarray)
if True, returns a sparse type (scipy.sparse.csr_matrix)
mode : str, {'connectivity', 'distance', 'affinity'}
If 'connectivity', a binary connectivity matrix is produced.
If 'distance', then a non-zero entry contains the distance between
points.
If 'adjacency', then non-zero entries are mapped to
`exp( - distance(i, j) / bandwidth)` where `bandwidth` is
as specified below.
bandwidth : None or float > 0
If using ``mode='affinity'``, this can be used to set the
bandwidth on the affinity kernel.
If no value is provided, it is set automatically to the median
distance between furthest nearest neighbors.
axis : int
The axis along which to compute recurrence.
By default, the last index (-1) is taken.
Returns
-------
rec : np.ndarray or scipy.sparse.csr_matrix, [shape=(t, t)]
Recurrence matrix
See Also
--------
sklearn.neighbors.NearestNeighbors
scipy.spatial.distance.cdist
librosa.feature.stack_memory
recurrence_to_lag
Notes
-----
This function caches at level 30.
Examples
--------
Find nearest neighbors in MFCC space
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> R = librosa.segment.recurrence_matrix(mfcc)
Or fix the number of nearest neighbors to 5
>>> R = librosa.segment.recurrence_matrix(mfcc, k=5)
Suppress neighbors within +- 7 samples
>>> R = librosa.segment.recurrence_matrix(mfcc, width=7)
Use cosine similarity instead of Euclidean distance
>>> R = librosa.segment.recurrence_matrix(mfcc, metric='cosine')
Require mutual nearest neighbors
>>> R = librosa.segment.recurrence_matrix(mfcc, sym=True)
Use an affinity matrix instead of binary connectivity
>>> R_aff = librosa.segment.recurrence_matrix(mfcc, mode='affinity')
Plot the feature and recurrence matrices
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(R, x_axis='time', y_axis='time')
>>> plt.title('Binary recurrence (symmetric)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(R_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Affinity recurrence')
>>> plt.tight_layout()
'''
data = np.atleast_2d(data)
# Swap observations to the first dimension and flatten the rest
data = np.swapaxes(data, axis, 0)
t = data.shape[0]
data = data.reshape((t, -1))
if width < 1:
raise ParameterError('width must be at least 1')
if mode not in ['connectivity', 'distance', 'affinity']:
raise ParameterError(("Invalid mode='{}'. Must be one of "
"['connectivity', 'distance', "
"'affinity']").format(mode))
if k is None:
if t > 2 * width + 1:
k = 2 * np.ceil(np.sqrt(t - 2 * width + 1))
else:
k = 2
if bandwidth is not None:
if bandwidth <= 0:
raise ParameterError('Invalid bandwidth={}. '
'Must be strictly positive.'.format(bandwidth))
k = int(k)
# Build the neighbor search object
try:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='auto')
except ValueError:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='brute')
knn.fit(data)
# Get the knn graph
if mode == 'affinity':
kng_mode = 'distance'
else:
kng_mode = mode
rec = knn.kneighbors_graph(mode=kng_mode).tolil()
# Remove connections within width
for diag in range(-width + 1, width):
rec.setdiag(0, diag)
# Retain only the top-k links per point
for i in range(t):
# Get the links from point i
links = rec[i].nonzero()[1]
# Order them ascending
idx = links[np.argsort(rec[i, links].toarray())][0]
# Everything past the kth closest gets squashed
rec[i, idx[k:]] = 0
# symmetrize
if sym:
rec = rec.minimum(rec.T)
rec = rec.tocsr()
rec.eliminate_zeros()
if mode == 'connectivity':
rec = rec.astype(np.bool)
elif mode == 'affinity':
if bandwidth is None:
bandwidth = np.median(rec.max(axis=1).data)
rec.data[:] = np.exp(- rec.data / bandwidth)
if not sparse:
rec = rec.toarray()
return rec
def recurrence_to_lag(rec, pad=True, axis=-1):
'''Convert a recurrence matrix into a lag matrix.
`lag[i, j] == rec[i+j, j]`
Parameters
----------
rec : np.ndarray, or scipy.sparse.spmatrix [shape=(n, n)]
A (binary) recurrence matrix, as returned by `recurrence_matrix`
pad : bool
If False, `lag` matrix is square, which is equivalent to
assuming that the signal repeats itself indefinitely.
If True, `lag` is padded with `n` zeros, which eliminates
the assumption of repetition.
axis : int
The axis to keep as the `time` axis.
The alternate axis will be converted to lag coordinates.
Returns
-------
lag : np.ndarray
The recurrence matrix in (lag, time) (if `axis=1`)
or (time, lag) (if `axis=0`) coordinates
Raises
------
ParameterError : if `rec` is non-square
See Also
--------
recurrence_matrix
lag_to_recurrence
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.tight_layout()
'''
axis = np.abs(axis)
if rec.ndim != 2 or rec.shape[0] != rec.shape[1]:
raise ParameterError('non-square recurrence matrix shape: '
'{}'.format(rec.shape))
sparse = scipy.sparse.issparse(rec)
roll_ax = None
if sparse:
roll_ax = 1 - axis
lag_format = rec.format
if axis == 0:
rec = rec.tocsc()
elif axis in (-1, 1):
rec = rec.tocsr()
t = rec.shape[axis]
if sparse:
if pad:
kron = np.asarray([[1, 0]]).swapaxes(axis, 0)
lag = scipy.sparse.kron(kron.astype(rec.dtype), rec, format='lil')
else:
lag = scipy.sparse.lil_matrix(rec)
else:
if pad:
padding = [(0, 0), (0, 0)]
padding[(1-axis)] = (0, t)
lag = np.pad(rec, padding, mode='constant')
else:
lag = rec.copy()
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
lag[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], -i, axis=roll_ax)
if sparse:
return lag.asformat(lag_format)
else:
return np.ascontiguousarray(lag.T).T
def lag_to_recurrence(lag, axis=-1):
'''Convert a lag matrix into a recurrence matrix.
Parameters
----------
lag : np.ndarray or scipy.sparse.spmatrix
A lag matrix, as produced by `recurrence_to_lag`
axis : int
The axis corresponding to the time dimension.
The alternate axis will be interpreted in lag coordinates.
Returns
-------
rec : np.ndarray or scipy.sparse.spmatrix [shape=(n, n)]
A recurrence matrix in (time, time) coordinates
For sparse matrices, format will match that of `lag`.
Raises
------
ParameterError : if `lag` does not have the correct shape
See Also
--------
recurrence_to_lag
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> rec_pad = librosa.segment.lag_to_recurrence(lag_pad)
>>> rec_nopad = librosa.segment.lag_to_recurrence(lag_nopad)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time', y_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_pad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (with padding)')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_nopad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (without padding)')
>>> plt.tight_layout()
'''
if axis not in [0, 1, -1]:
raise ParameterError('Invalid target axis: {}'.format(axis))
axis = np.abs(axis)
if lag.ndim != 2 or (lag.shape[0] != lag.shape[1] and
lag.shape[1 - axis] != 2 * lag.shape[axis]):
raise ParameterError('Invalid lag matrix shape: {}'.format(lag.shape))
# Since lag must be 2-dimensional, abs(axis) = axis
t = lag.shape[axis]
sparse = scipy.sparse.issparse(lag)
if sparse:
rec = scipy.sparse.lil_matrix(lag)
roll_ax = 1 - axis
else:
rec = lag.copy()
roll_ax = None
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
rec[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], i, axis=roll_ax)
sub_slice = [slice(None)] * rec.ndim
sub_slice[1 - axis] = slice(t)
rec = rec[tuple(sub_slice)]
if sparse:
return rec.asformat(lag.format)
else:
return np.ascontiguousarray(rec.T).T
def timelag_filter(function, pad=True, index=0):
'''Filtering in the time-lag domain.
This is primarily useful for adapting image filters to operate on
`recurrence_to_lag` output.
Using `timelag_filter` is equivalent to the following sequence of
operations:
>>> data_tl = librosa.segment.recurrence_to_lag(data)
>>> data_filtered_tl = function(data_tl)
>>> data_filtered = librosa.segment.lag_to_recurrence(data_filtered_tl)
Parameters
----------
function : callable
The filtering function to wrap, e.g., `scipy.ndimage.median_filter`
pad : bool
Whether to zero-pad the structure feature matrix
index : int >= 0
If `function` accepts input data as a positional argument, it should be
indexed by `index`
Returns
-------
wrapped_function : callable
A new filter function which applies in time-lag space rather than
time-time space.
Examples
--------
Apply a 5-bin median filter to the diagonal of a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma)
>>> from scipy.ndimage import median_filter
>>> diagonal_median = librosa.segment.timelag_filter(median_filter)
>>> rec_filtered = diagonal_median(rec, size=(1, 3), mode='mirror')
Or with affinity weights
>>> rec_aff = librosa.segment.recurrence_matrix(chroma, mode='affinity')
>>> rec_aff_fil = diagonal_median(rec_aff, size=(1, 3), mode='mirror')
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8,8))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(rec, y_axis='time')
>>> plt.title('Raw recurrence matrix')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(rec_filtered)
>>> plt.title('Filtered recurrence matrix')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Raw affinity matrix')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_aff_fil, x_axis='time',
... cmap='magma_r')
>>> plt.title('Filtered affinity matrix')
>>> plt.tight_layout()
'''
def __my_filter(wrapped_f, *args, **kwargs):
'''Decorator to wrap the filter'''
# Map the input data into time-lag space
args = list(args)
args[index] = recurrence_to_lag(args[index], pad=pad)
# Apply the filtering function
result = wrapped_f(*args, **kwargs)
# Map back into time-time and return
return lag_to_recurrence(result)
return decorator(__my_filter, function)
@cache(level=30)
def subsegment(data, frames, n_segments=4, axis=-1):
'''Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout()
'''
frames = util.fix_frames(frames, x_min=0, x_max=data.shape[axis], pad=True)
if n_segments < 1:
raise ParameterError('n_segments must be a positive integer')
boundaries = []
idx_slices = [slice(None)] * data.ndim
for seg_start, seg_end in zip(frames[:-1], frames[1:]):
idx_slices[axis] = slice(seg_start, seg_end)
boundaries.extend(seg_start + agglomerative(data[idx_slices],
min(seg_end - seg_start, n_segments),
axis=axis))
return np.ascontiguousarray(boundaries)
def agglomerative(data, k, clusterer=None, axis=-1):
"""Bottom-up temporal segmentation.
Use a temporally-constrained agglomerative clustering routine to partition
`data` into `k` contiguous segments.
Parameters
----------
data : np.ndarray
data to cluster
k : int > 0 [scalar]
number of segments to produce
clusterer : sklearn.cluster.AgglomerativeClustering, optional
An optional AgglomerativeClustering object.
If `None`, a constrained Ward object is instantiated.
axis : int
axis along which to cluster.
By default, the last axis (-1) is chosen.
Returns
-------
boundaries : np.ndarray [shape=(k,)]
left-boundaries (frame numbers) of detected segments. This
will always include `0` as the first left-boundary.
See Also
--------
sklearn.cluster.AgglomerativeClustering
Examples
--------
Cluster by chroma similarity, break into 20 segments
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> bounds = librosa.segment.agglomerative(chroma, 20)
>>> bound_times = librosa.frames_to_time(bounds, sr=sr)
>>> bound_times
array([ 0. , 1.672, 2.322, 2.624, 3.251, 3.506,
4.18 , 5.387, 6.014, 6.293, 6.943, 7.198,
7.848, 9.033, 9.706, 9.961, 10.635, 10.89 ,
11.54 , 12.539])
Plot the segmentation over the chromagram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.vlines(bound_times, 0, chroma.shape[0], color='linen', linestyle='--',
... linewidth=2, alpha=0.9, label='Segment boundaries')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('Power spectrogram')
>>> plt.tight_layout()
"""
# Make sure we have at least two dimensions
data = np.atleast_2d(data)
# Swap data index to position 0
data = np.swapaxes(data, axis, 0)
# Flatten the features
n = data.shape[0]
data = data.reshape((n, -1))
if clusterer is None:
# Connect the temporal connectivity graph
grid = sklearn.feature_extraction.image.grid_to_graph(n_x=n,
n_y=1, n_z=1)
# Instantiate the clustering object
clusterer = sklearn.cluster.AgglomerativeClustering(n_clusters=k,
connectivity=grid,
memory=cache)
# Fit the model
clusterer.fit(data)
# Find the change points from the labels
boundaries = [0]
boundaries.extend(
list(1 + np.nonzero(np.diff(clusterer.labels_))[0].astype(int)))
return np.asarray(boundaries)
| isc |
allinpaybusiness/ACS | allinpay projects/creditscorelogisticvaropt/classlogistic.py | 1 | 18588 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys;
import os;
sys.path.append("allinpay projects")
from imp import reload
import creditscore
reload(creditscore)
from creditscore import CreditScore
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import KFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
class CreditScoreLogistic(CreditScore):
def logistic_trainandtest(self, testsize, cv, feature_sel, varthreshold, nclusters, cmethod, resmethod):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#重采样resampling 解决样本不平衡问题
X_train1, y_train = self.imbalanceddata (X_train1, y_train, resmethod)
#训练并预测模型
classifier = LogisticRegression() # 使用类,参数全是默认的
classifier.fit(X_train1, y_train)
#predicted = classifier.predict(X_test)
probability = classifier.predict_proba(X_test1)
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability[:,1]})
return predresult
def logistic_trainandtest_kfold(self, nsplit, cv, feature_sel, varthreshold, nclusters, cmethod, resmethod):
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
#将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集
kf = KFold(n_splits=nsplit, shuffle=True)
predresult = pd.DataFrame()
for train_index, test_index in kf.split(data_feature):
X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ]
y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ]
#如果随机抽样造成train或者test中只有一个分类,跳过此次预测
if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1):
continue
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#重采样resampling 解决样本不平衡问题
X_train1, y_train = self.imbalanceddata (X_train1, y_train, resmethod)
#训练并预测模型
classifier = LogisticRegression() # 使用类,参数全是默认的
classifier.fit(X_train1, y_train)
#predicted = classifier.predict(X_test)
probability = classifier.predict_proba(X_test1)
temp = pd.DataFrame({'target' : y_test, 'probability' : probability[:,1]})
predresult = pd.concat([predresult, temp], ignore_index = True)
return predresult
def logistic_trainandtest_kfold_LRCV(self, nsplit, cv, feature_sel=None, varthreshold=0, op='liblinear', nclusters=10, cmethod=None):
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
#将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集
kf = KFold(n_splits=nsplit, shuffle=True)
predresult = pd.DataFrame()
for train_index, test_index in kf.split(data_feature):
X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ]
y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ]
#如果随机抽样造成train或者test中只有一个分类,跳过此次预测
if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1):
continue
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = LogisticRegressionCV(cv=nsplit,solver=op) # 使用类,参数全是默认的
classifier.fit(X_train1, y_train)
#predicted = classifier.predict(X_test)
probability = classifier.predict_proba(X_test1)
temp = pd.DataFrame({'target' : y_test, 'probability' : probability[:,1]})
predresult = pd.concat([predresult, temp], ignore_index = True)
return predresult
def looplogistic_trainandtest(self, testsize, cv, feature_sel=None, varthreshold=0,cmethod=None ):
df = pd.DataFrame()
for i in range (3 , 101):#对bin或者ncluster做循环
#分割train test做测试
predresult = self.logistic_trainandtest(i, testsize, cv, feature_sel, varthreshold,nclusters=i,cmethod=cmethod)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'bin' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'.csv',index=False,sep=',')
def looplogistic_trainandtest_kfold(self, nsplit, cv, feature_sel=None, varthreshold=0,cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101):#对bin或者ncluster做循环
#做cross validation测试
predresult = self.logistic_trainandtest_kfold(i, nsplit, cv, feature_sel, varthreshold,nclusters =i,cmethod=cmethod)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'bin' : i, 'auc_value' : auc ,'ks_value' :ks,'p0=0.5,accuracy' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = True)
print(' num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
if cmethod != None:
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold-'+'-'+self.dataname+'-'+cmethod+'.csv',index=False,sep=',')
else:
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold-'+'-'+self.dataname+'.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
if cmethod != None:
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold-'+'-'+self.dataname+'-'+cmethod+'.csv',index=False,sep=',')
else:
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold-'+'-'+self.dataname+'.csv',index=False,sep=',')
def looplogistic_trainandtest_kfold_LRCV(self, nsplit, cv, feature_sel=None, varthreshold=0,op='liblinear',cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101):#对bin做循环
#做cross validation cv测试
predresult = self.logistic_trainandtest_kfold_LRCV(nsplit, cv, feature_sel, varthreshold ,op=op,nclusters=i)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'bin' : i, 'auc_value' : auc ,'ks_value' :ks,'p0=0.5,accuracy' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = True)
print(' num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold_LRCV-'+op+'-'+self.dataname+'.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'-kfold_LRCV-'+op+'-'+self.dataname+'.csv',index=False,sep=',')
def filterlogistic_trainandtest(self, testsize, cv, feature_sel, varthreshold, nclusters, cmethod, resmethod, n_exclude):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#重采样resampling 解决样本不平衡问题
X_train1, y_train = self.imbalanceddata (X_train1, y_train, resmethod)
#训练并预测模型
classifier = LogisticRegression() # 使用类,参数全是默认的
classifier.fit(X_train1, y_train)
#predicted = classifier.predict(X_test)
probability = classifier.predict_proba(X_test1)
# Log likelihood ratio statistic
logproba = classifier.predict_log_proba(X_train1)
LL = 0
for i in range (len(y_train)):
if y_train.iloc[i] == 0:
LL = LL + logproba[i][0]
else:
LL = LL + logproba[i][1]
LL = -2 * LL
for i in range(classifier.coef_.shape[1]):
print(classifier.coef_[0][i])
print(classifier.intercept_)
print ('Coefficients above\n')
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability[:,1]})
# 对X_train1中每个变量进行剔除后重新回归,并计算新的LL,比较与完整LL的差值后
# 剔除对LL影响最小的变量,并对测试数据进行重新预测并与愿预测结果比较
# 对此剔除操作进行n_exclude次
X_train1_temp = X_train1.copy()
X_test1_temp = X_test1.copy()
j_removed = []
for k in range(n_exclude):
MinImprovement = np.inf
# MaxImprovement = -np.inf
j_exclude = 0
for j in range (X_train1_temp.shape[1]):
X_train2_temp = X_train1_temp.drop(X_train1_temp.columns[j], axis=1)
classifier.fit(X_train2_temp, y_train)
# Log likelihood ratio statistic
logproba = classifier.predict_log_proba(X_train2_temp)
LLp = 0
for i in range (len(y_train)):
if y_train.iloc[i] == 0:
LLp = LLp + logproba[i][0]
else:
LLp = LLp + logproba[i][1]
LLp = -2 * LLp
Improvement = LLp - LL
if Improvement < MinImprovement:
MinImprovement = Improvement
classifierp = classifier
j_exclude = j
# just for fun
# if Improvement > MaxImprovement:
# MaxImprovement = Improvement
# classifierp = classifier
# j_exclude = j
X_train1_temp = X_train1_temp.drop(X_train1_temp.columns[j_exclude], axis=1)
X_test1_temp = X_test1_temp.drop(X_test1_temp.columns[j_exclude], axis=1)
j_exclude_real = j_exclude + [l<=j_exclude for l in j_removed].count(True)
while [j_exclude_real == l for l in j_removed].count(True) != 0:
j_exclude_real = j_exclude_real + 1
j_removed.append(j_exclude_real)
print('Total LL improved ', MinImprovement)
print('The ', j_exclude_real, ' th character removed')
probabilityp = classifierp.predict_proba(X_test1_temp)
predresultp = pd.DataFrame({'target' : y_test, 'probability' : probabilityp[:,1]})
predresult = predresult.append(predresultp)
# predresultp = pd.DataFrame({'target' : y_test, 'probability' : probabilityp[:,1]})
return predresult
| apache-2.0 |
marcocaccin/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_toolkits/axes_grid1/anchored_artists.py | 8 | 5410 |
from matplotlib.patches import Rectangle, Ellipse
import numpy as np
from matplotlib.offsetbox import AnchoredOffsetbox, AuxTransformBox, VPacker,\
TextArea, AnchoredText, DrawingArea, AnnotationBbox
class AnchoredDrawingArea(AnchoredOffsetbox):
"""
AnchoredOffsetbox with DrawingArea
"""
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
*width*, *height*, *xdescent*, *ydescent* : the dimensions of the DrawingArea.
*prop* : font property. This is only used for scaling the paddings.
"""
self.da = DrawingArea(width, height, xdescent, ydescent, clip=True)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.da,
prop=None,
frameon=frameon,
**kwargs)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an ellipse the size in data coordinate of the give axes.
pad, borderpad in fraction of the legend font size (or prop)
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0,0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2, prop=None, frameon=True,
**kwargs):
"""
Draw a horizontal bar with the size in data coordinate of the give axes.
A label will be drawn underneath (center-aligned).
pad, borderpad in fraction of the legend font size (or prop)
sep in points.
"""
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0,0), size, 0, fc="none"))
self.txt_label = TextArea(label, minimumdescent=False)
self._box = VPacker(children=[self.size_bar, self.txt_label],
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.gcf()
fig.clf()
ax = plt.subplot(111)
offsetbox = AnchoredText("Test", loc=6, pad=0.3,
borderpad=0.3, prop=None)
xy = (0.5, 0.5)
ax.plot([0.5], [0.5], "xk")
ab = AnnotationBbox(offsetbox, xy,
xybox=(1., .5),
xycoords='data',
boxcoords=("axes fraction", "data"),
arrowprops=dict(arrowstyle="->"))
#arrowprops=None)
ax.add_artist(ab)
from matplotlib.patches import Circle
ada = AnchoredDrawingArea(20, 20, 0, 0,
loc=6, pad=0.1, borderpad=0.3, frameon=True)
p = Circle((10, 10), 10)
ada.da.add_artist(p)
ab = AnnotationBbox(ada, (0.3, 0.4),
xybox=(1., 0.4),
xycoords='data',
boxcoords=("axes fraction", "data"),
arrowprops=dict(arrowstyle="->"))
#arrowprops=None)
ax.add_artist(ab)
arr = np.arange(100).reshape((10,10))
im = AnchoredImage(arr,
loc=4,
pad=0.5, borderpad=0.2, prop=None, frameon=True,
zoom=1,
cmap = None,
norm = None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample = False,
)
ab = AnnotationBbox(im, (0.5, 0.5),
xybox=(-10., 10.),
xycoords='data',
boxcoords="offset points",
arrowprops=dict(arrowstyle="->"))
#arrowprops=None)
ax.add_artist(ab)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.draw()
plt.show()
| gpl-2.0 |
codeforfrankfurt/PolBotCheck | polbotcheck/plots/front_back_link.py | 1 | 1108 | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import db
import seaborn as sns
import pandas as pd
import numpy as np
# takes data (that should come from the backend) and creates the output we would like to have
# on the front end.
def follower_botness(username):
#given a username, it creates the histogram of the botness of the followers
#and saves it in plots (for now) it also returns the probable percentage of follower bots
#(cutoff needs to be defined, for now it is 0.7)"""
cutoff = 0.7
scorelist = []
followers = db.getFollowers(toName=username)
for f in followers:
follower = f['_from'].split('/')[1]
score = db.getUser(follower)['botness']['score']
scorelist.append(score)
if scorelist:
scores = pd.Series(scorelist, name='probability of follower bot')
ax = sns.distplot(scores)
fig = ax.get_figure()
fig.savefig('testfig.png')
botpercent = sum(np.array(scorelist)>cutoff) / len(scorelist)
return botpercent
else:
return None
| mit |
spallavolu/scikit-learn | sklearn/linear_model/least_angle.py | 61 | 54324 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
kagayakidan/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
marmarko/ml101 | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 8 | 8995 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| bsd-2-clause |
pauljxtan/nbody | src/plot_realtime_N3.py | 1 | 1028 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import sys
def main():
# Set the frameskip (plot every n-th frame)
frame_skip = int(sys.argv[1])
# Enable interactive mode
plt.ion()
# Set up figure
fig = plt.figure()
sp = fig.add_subplot(111)
frame_count = 0
while True:
line = sys.stdin.readline()
# Check if end of simulation
if line == '':
break
frame_count += 1
# Check if frame should be skipped
if frame_count % frame_skip != 0:
continue
# Process data
data = map(float, line.strip().split(" "))
# Update plot
sp.set_title("t = %d" % frame_count)
sp.scatter(data[1], data[2], s=1, color='b')
sp.scatter(data[7], data[8], s=1, color='g')
sp.scatter(data[13], data[14], s=1, color='r')
# Save to file
#plt.savefig("./frames/%07d.png" % framecount)
plt.draw()
if __name__ == "__main__":
sys.exit(main())
| mit |
a301-teaching/pyman | Book/chap5/Supporting Materials/MultPlotDemo.py | 3 | 1270 | # Demonstrates the following:
# plotting logarithmic axes
# user-defined functions
# "where" function, NumPy array conditional
import numpy as np
import matplotlib.pyplot as plt
# Define the sinc function, with output for x=0 defined
# as a special case to avoid division by zero
def s(x):
a = np.where(x==0., 1., np.sin(x)/x)
return a
# create arrays for plotting
x = np.arange(0., 10., 0.1)
y = np.exp(x)
t = np.linspace(-10., 10., 100)
z = s(t)
# create a figure window
fig = plt.figure(1, figsize=(9,8))
# subplot: linear plot of exponential
ax1 = fig.add_subplot(2,2,1)
ax1.plot(x, y)
ax1.set_xlabel('time (ms)')
ax1.set_ylabel('distance (mm)')
ax1.set_title('exponential')
# subplot: semi-log plot of exponential
ax2 = fig.add_subplot(2,2,2)
ax2.plot(x, y)
ax2.set_yscale('log')
ax2.set_xlabel('time (ms)')
ax2.set_ylabel('distance (mm)')
ax2.set_title('exponential')
# subplot: wide subplot of sinc function
ax3 = fig.add_subplot(2,1,2)
ax3.plot(t, z, 'r')
ax3.axhline(color='gray')
ax3.axvline(color='gray')
ax3.set_xlabel('angle (deg)')
ax3.set_ylabel('electric field')
ax3.set_title('sinc function')
# Adjusts while space around plots to avoid collisions between subplots
fig.tight_layout()
plt.savefig("MultPlotDemo.pdf")
plt.show()
| cc0-1.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/plot_johnson_lindenstrauss_bound.py | 39 | 7489 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.), edgecolor='k')
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| gpl-3.0 |
kdaily/altanalyze | misopy/sashimi_plot/Sashimi.py | 1 | 3716 | ##
## Class for representing figures
##
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import misopy.sashimi_plot.plot_utils.plot_settings as plot_settings
import misopy.sashimi_plot.plot_utils.plotting as plotting
class Sashimi:
"""
Representation of a figure.
"""
def __init__(self, label, output_dir, dimensions=None, png=False,
output_filename=None, settings_filename=None,
event=None, chrom=None, no_posteriors=False):
"""
Initialize image settings.
"""
self.output_ext = ".pdf"
if png:
self.output_ext = ".png"
# Plot label, will be used in creating the plot
# output filename
self.label = label
# Set output directory
self.set_output_dir(output_dir)
# Plot settings
self.settings_filename = settings_filename
if self.settings_filename != None:
self.settings = plot_settings.parse_plot_settings(settings_filename,
event=event,
chrom=chrom,
no_posteriors=no_posteriors)
else:
# Load default settings if no settings filename was given
self.settings = plot_settings.get_default_settings()
if output_filename != None:
# If explicit output filename is given to us, use it
self.output_filename = output_filename
else:
# Otherwise, use the label and the output directory
self.set_output_filename()
if dimensions != None:
self.dimensions = dimensions
else:
fig_height = self.settings["fig_height"]
fig_width = self.settings["fig_width"]
#print "Reading dimensions from settings..."
#print " - Height: %.2f" %(float(fig_height))
#print " - Width: %.2f" %(float(fig_width))
self.dimensions = [fig_width, fig_height]
def set_output_dir(self, output_dir):
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
def set_output_filename(self):
plot_basename = "%s%s" %(self.label, self.output_ext)
self.output_filename = os.path.join(self.output_dir, plot_basename)
def setup_figure(self):
#print "Setting up plot using dimensions: ", self.dimensions
plt.figure(figsize=self.dimensions)
# If asked, use sans serif fonts
font_size = self.settings["font_size"]
if self.settings["sans_serif"]:
#print "Using sans serif fonts."
plotting.make_sans_serif(font_size=font_size)
def save_plot(self, plot_label=None):
"""
Save plot to the output directory. Determine
the file type.
"""
if self.output_filename == None:
raise Exception, "sashimi_plot does not know where to save the plot."
output_fname = None
if plot_label is not None:
# Use custom plot label if given
ext = self.output_filename.rsplit(".")[0]
dirname = os.path.dirname(self.output_filename)
output_fname = \
os.path.dirname(dirname, "%s.%s" %(plot_label, ext))
else:
output_fname = self.output_filename
print '.',
#print "Saving plot to: %s" %(output_fname)
#output_fname2=output_fname.replace(".pdf")
plt.savefig(output_fname)
plt.clf()
plt.close() ### May result in TK associated errors later on
| apache-2.0 |
tri-CSI/Bioinfo | lib/GTF.py | 1 | 2702 | #!/usr/bin/env python
"""
GTF.py
Kamil Slowikowski
December 24, 2013
Changed by: Tran Minh Tri
Org: CTRAD - CSI
ver: 0.0.1
Read GFF/GTF files. Works with gzip compressed files and pandas.
http://useast.ensembl.org/info/website/upload/gff.html
Source: https://gist.github.com/slowkow/8101481
"""
from collections import defaultdict
import gzip
import pandas as pd
import re
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def dataframe(filename):
""" Return 2 dictionaries for GeneId -> Gene name and transcript_id -> (gene_id, gene_name)
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
GeneIdDict = {}
TranScIdDict = {}
for i, line in enumerate(lines(filename)):
for key in line:
if key == "transcript_id" and not key in TranScIdDict:
TranScIdDict[line[key]] = (line["gene_id"], line["gene_name"])
if key == "gene_id" and not key in GeneIdDict:
GeneIdDict[line[key]] = line["gene_name"]
return (GeneIdDict, TranScIdDict)
def lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = re.split(R_SEMICOLON, fields[8])
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value | cc0-1.0 |
xiyuw123/Tax-Calculator | taxcalc/decorators.py | 1 | 11007 | import numpy as np
import pandas as pd
import inspect
from numba import jit, vectorize, guvectorize
from functools import wraps
from six import StringIO
import ast
import toolz
class GetReturnNode(ast.NodeVisitor):
"""
A Visitor to get the return tuple names from a calc-style
function
"""
def visit_Return(self, node):
if isinstance(node.value, ast.Tuple):
return [e.id for e in node.value.elts]
else:
return [node.value.id]
def dataframe_guvectorize(dtype_args, dtype_sig):
"""
Extracts numpy arrays from caller arguments and passes them
to guvectorized numba functions
"""
def make_wrapper(func):
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
@wraps(func)
def wrapper(*args, **kwargs):
# np_arrays = [getattr(args[0], i).values for i in theargs]
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_vectorize(dtype_args):
"""
Extracts numpy arrays from caller arguments and passes them
to vectorized numba functions
"""
def make_wrapper(func):
vecd_f = vectorize(dtype_args)(func)
@wraps(func)
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_wrap_guvectorize(dtype_args, dtype_sig):
"""
Extracts particular numpy arrays from caller argments and passes
them to guvectorize. Goes one step further than dataframe_guvectorize
by looking for the column names in the dataframe and just extracting those
"""
def make_wrapper(func):
theargs = inspect.getargspec(func).args
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
def wrapper(*args, **kwargs):
np_arrays = [getattr(args[0], i).values for i in theargs]
ans = vecd_f(*np_arrays)
return ans
return wrapper
return make_wrapper
def create_apply_function_string(sigout, sigin, parameters):
"""
Create a string for a function of the form:
def ap_fuc(x_0, x_1, x_2, ...):
for i in range(len(x_0)):
x_0[i], ... = jitted_f(x_j[i],....)
return x_0[i], ...
where the specific args to jitted_f and the number of
values to return is destermined by sigout and signn
Parameters
----------
sigout: iterable of the out arguments
sigin: iterable of the in arguments
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
Returns
-------
a String representing the function
"""
s = StringIO()
total_len = len(sigout) + len(sigin)
out_args = ["x_" + str(i) for i in range(0, len(sigout))]
in_args = ["x_" + str(i) for i in range(len(sigout), total_len)]
s.write("def ap_func({0}):\n".format(",".join(out_args + in_args)))
s.write(" for i in range(len(x_0)):\n")
out_index = [x + "[i]" for x in out_args]
in_index = []
for arg, _var in zip(in_args, sigin):
in_index.append(arg + "[i]" if _var not in parameters else arg)
s.write(" " + ",".join(out_index) + " = ")
s.write("jitted_f(" + ",".join(in_index) + ")\n")
s.write(" return " + ",".join(out_args) + "\n")
return s.getvalue()
def create_toplevel_function_string(args_out, args_in, pm_or_pf,
kwargs_for_func={}):
"""
Create a string for a function of the form:
def hl_func(x_0, x_1, x_2, ...):
outputs = (...) = calc_func(...)
header = [...]
return DataFrame(data, columns=header)
where the specific args to jitted_f and the number of
values to return is destermined by sigout and signn
Parameters
----------
args_out: iterable of the out arguments
args_in: iterable of the in arguments
pm_or_pf: iterable of strings for object that holds each arg
kwargs_for_func: dictionary of keyword args for the function
Returns
-------
a String representing the function
"""
s = StringIO()
s.write("def hl_func(pm, pf")
if kwargs_for_func:
kwargs = ",".join(str(k) + "=" + str(v) for k, v in
kwargs_for_func.items())
s.write(", " + kwargs + " ")
s.write("):\n")
s.write(" from pandas import DataFrame\n")
s.write(" import numpy as np\n")
s.write(" outputs = \\\n")
outs = []
for arg in kwargs_for_func:
args_in.remove(arg)
for p, attr in zip(pm_or_pf, args_out + args_in):
outs.append(p + "." + attr + ", ")
outs = [m_or_f + "." + arg for m_or_f, arg in zip(pm_or_pf, args_out)]
s.write(" (" + ", ".join(outs) + ") = \\\n")
s.write(" " + "applied_f(")
for p, attr in zip(pm_or_pf, args_out + args_in):
s.write(p + "." + attr + ", ")
for arg in kwargs_for_func:
s.write(arg + ", ")
s.write(")\n")
s.write(" header = [")
col_headers = ["'" + out + "'" for out in args_out]
s.write(", ".join(col_headers))
s.write("]\n")
if len(args_out) == 1:
s.write(" return DataFrame(data=outputs,"
"columns=header)")
else:
s.write(" return DataFrame(data=np.column_stack("
"outputs),columns=header)")
return s.getvalue()
def make_apply_function(func, out_args, in_args, parameters, do_jit=True,
**kwargs):
"""
Takes a '_calc' function and creates the necessary Python code for an
_apply style function. Will also jit the function if desired
Parameters
----------
func: the 'calc' style function
out_args: list of out arguments for the apply function
in_args: list of in arguments for the apply function
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
do_jit: Bool, if True, jit the resulting apply function
Returns
-------
'_apply' style function
"""
jitted_f = jit(**kwargs)(func)
apfunc = create_apply_function_string(out_args, in_args, parameters)
func_code = compile(apfunc, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"jitted_f": jitted_f}, fakeglobals)
if do_jit:
return jit(**kwargs)(fakeglobals['ap_func'])
else:
return fakeglobals['ap_func']
def apply_jit(dtype_sig_out, dtype_sig_in, parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, handle
the apply step
"""
if not parameters:
parameters = []
def make_wrapper(func):
theargs = inspect.getargspec(func).args
jitted_f = jit(**kwargs)(func)
jitted_apply = make_apply_function(func, dtype_sig_out,
dtype_sig_in, parameters,
**kwargs)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
for farg in theargs:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
else:
in_arrays.append(getattr(args[1], farg))
for farg in dtype_sig_out:
if hasattr(args[0], farg):
out_arrays.append(getattr(args[0], farg))
else:
out_arrays.append(getattr(args[1], farg))
final_array = out_arrays + in_arrays
ans = jitted_apply(*final_array)
return ans
return wrapper
return make_wrapper
def iterate_jit(parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, create a
function that handles the "high-level" function and the "_apply"
style function
"""
if not parameters:
parameters = []
def make_wrapper(func):
# Step 1. Wrap this function in apply_jit
# from apply_jit
# Get the input arguments from the function
in_args = inspect.getargspec(func).args
try:
jit_args = inspect.getargspec(jit).args + ['nopython']
except TypeError:
print ("This should only be seen in RTD, if not install numba!")
return func
kwargs_for_func = toolz.keyfilter(in_args.__contains__, kwargs)
kwargs_for_jit = toolz.keyfilter(jit_args.__contains__, kwargs)
src = inspect.getsourcelines(func)[0]
# Discover the return arguments by walking
# the AST of the function
all_returned_vals = []
gnr = GetReturnNode()
all_out_args = None
for node in ast.walk(ast.parse(''.join(src))):
all_out_args = gnr.visit(node)
if all_out_args:
break
if not all_out_args:
raise ValueError("Can't find return statement in function!")
# Now create the apply jitted function
applied_jitted_f = make_apply_function(func,
list(reversed(all_out_args)),
in_args,
parameters=parameters,
do_jit=True,
**kwargs_for_jit)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
pm_or_pf = []
for farg in all_out_args + in_args:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
pm_or_pf.append("pm")
elif hasattr(args[1], farg):
in_arrays.append(getattr(args[1], farg))
pm_or_pf.append("pf")
elif not farg in kwargs_for_func:
raise ValueError("Unknown arg: " + farg)
# Create the high level function
high_level_func = create_toplevel_function_string(all_out_args,
list(in_args),
pm_or_pf,
kwargs_for_func)
func_code = compile(high_level_func, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"applied_f": applied_jitted_f}, fakeglobals)
high_level_fn = fakeglobals['hl_func']
ans = high_level_fn(*args, **kwargs)
return ans
return wrapper
return make_wrapper
| mit |
elijah513/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
timcera/hspfbintoolbox | tests/test_catalog.py | 1 | 115314 | # -*- coding: utf-8 -*-
"""
catalog
----------------------------------
Tests for `hspfbintoolbox` module.
"""
import csv
import shlex
import subprocess
import sys
from unittest import TestCase
from pandas.testing import assert_frame_equal
try:
from cStringIO import StringIO
except:
from io import StringIO
import pandas as pd
from hspfbintoolbox import hspfbintoolbox
interval2codemap = {"yearly": 5, "monthly": 4, "daily": 3, "bivl": 2}
def capture(func, *args, **kwds):
sys.stdout = StringIO() # capture output
out = func(*args, **kwds)
out = sys.stdout.getvalue() # release output
try:
out = bytes(out, "utf-8")
except:
pass
return out
def read_unicode_csv(
filename,
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator="\n",
encoding="utf-8",
):
# Python 3 version
if sys.version_info[0] >= 3:
# Open the file in text mode with given encoding
# Set newline arg to ''
# (see https://docs.python.org/3/library/csv.html)
# Next, get the csv reader, with unicode delimiter and quotechar
csv_reader = csv.reader(
filename,
delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator,
)
# Now, iterate over the (already decoded) csv_reader generator
for row in csv_reader:
yield row
# Python 2 version
else:
# Next, get the csv reader, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_reader = csv.reader(
filename,
delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting,
lineterminator=lineterminator,
)
# Iterate over the file and decode each string into unicode
for row in csv_reader:
yield [cell.decode(encoding) for cell in row]
class TestDescribe(TestCase):
def setUp(self):
self.catalog = b"""\
LUE , LC,GROUP ,VAR , TC,START ,END ,TC
IMPLND, 11,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 11,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 11,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 11,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 11,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 11,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 12,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 13,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 14,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 21,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 22,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 23,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 24,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 31,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 32,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 33,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 111,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 112,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 113,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 114,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 211,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 212,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 213,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 214,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 301,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 302,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 303,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 304,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 311,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 312,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 313,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 314,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 411,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 412,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 413,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 414,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 511,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 512,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 513,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 514,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 611,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 612,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 613,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 614,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 711,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 712,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 713,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 714,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 811,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 812,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 813,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 814,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 822,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 823,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 824,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 901,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 902,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 903,IWATER ,SURS , 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,IMPEV, 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,PET , 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,RETS , 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,SUPY , 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,SURO , 5,1951 ,2001 ,yearly
IMPLND, 904,IWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 11,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 12,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 13,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 14,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 15,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 21,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 22,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 23,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 24,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 25,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 31,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 32,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 33,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 35,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 111,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 112,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 113,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 114,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 115,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 211,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 212,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 213,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 214,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 215,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 301,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 302,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 303,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 304,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 305,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 311,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 312,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 313,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 314,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 315,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 411,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 412,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 413,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 414,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 415,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 511,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 512,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 513,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 514,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 515,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 611,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 612,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 613,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 614,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 615,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 711,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 712,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 713,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 714,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 715,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 811,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 812,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 813,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 814,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 815,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 822,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 823,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 824,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 825,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 901,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 902,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 903,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 904,PWATER ,UZS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,AGWET, 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,AGWI , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,AGWO , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,AGWS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,BASET, 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,CEPE , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,CEPS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,GWVS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,IFWI , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,IFWO , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,IFWS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,IGWI , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,INFIL, 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,LZET , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,LZI , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,LZS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,PERC , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,PERO , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,PERS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,PET , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,SUPY , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,SURO , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,SURS , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,TAET , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,UZET , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,UZI , 5,1951 ,2001 ,yearly
PERLND, 905,PWATER ,UZS , 5,1951 ,2001 ,yearly
"""
ndict = []
rd = read_unicode_csv(StringIO(self.catalog.decode()))
next(rd)
for row in rd:
if len(row) == 0:
continue
nrow = [i.strip() for i in row]
ndict.append(
(nrow[0], int(nrow[1]), nrow[2], nrow[3], interval2codemap[nrow[7]])
)
self.ncatalog = sorted(ndict)
def test_catalog_api(self):
out = hspfbintoolbox.catalog("tests/6b_np1.hbn")
out = [i[:5] for i in out]
self.assertEqual(out, self.ncatalog)
def test_catalog_cli(self):
args = "hspfbintoolbox catalog --tablefmt csv tests/6b_np1.hbn"
args = shlex.split(args)
out = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE
).communicate()[0]
self.assertEqual(out, self.catalog)
| bsd-3-clause |
dnolivieri/RFVextract | rfVextract/VsPredict05.py | 1 | 3766 | #!/usr/bin/env python
"""
dnolivieri: (updated: 23 oct 2014)
random forest, but using the idea of
intervals used in the MHC exons
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import os, fnmatch
import sys
import itertools
from operator import itemgetter, attrgetter
import math
from Bio import SeqIO
from Bio.Seq import Seq
from Bio import Motif
from Bio.SeqRecord import SeqRecord
from Bio import SeqFeature
from scipy import *
import struct
import re
#from propy import PyPro
#from propy.GetProteinFromUniprot import GetProteinSequence
import json
import cPickle as pickle
rno = {'A':0,'R':1,'N':2,'D':3,'C':4,'Q':5,'E':6,'G':7,'H':8,'I':9,'L':10,'K':11,'M':12,'F':13,'P':14,'S':15,'T':16,'W':17,'Y':18,'V':19}
class VregionsPredict:
def __init__(self, S, desc_method, loci_classes, mammalList):
strand=1
self.S = S
self.desc_method= desc_method
self.loci_classes = loci_classes
self.mammalList = mammalList
self.Lexon=320
self.contigs = self.get_contigs(mammalList[0])
qp = open('normalizedAA_Matrix.pkl', 'rb')
self.normI = pickle.load(qp)
self.predicted_seqs = []
self.rfmodels = self.get_models()
#self.analyze_files(mammalList)
def get_contigs(self, mammal):
contigs=[]
fp = open(self.S[mammal]["contigs"], "r")
for lk in fp:
contigs.append(lk.strip())
fp.close()
print contigs
#raw_input("The contigs")
return contigs
def get_models(self):
print "Getting Models"
rfmodels = []
for loci in self.loci_classes:
matfile = "trainMat_" + loci + ".pkl"
fp = open(matfile, 'rb')
rfmodels.append( pickle.load(fp) )
return rfmodels
def analyze_files(self, mammalList):
for mammal in mammalList:
fbar= self.S[mammal]["WGS"]
#self.predict_seqs= self.get_VregionsRF(fbar)
self.get_Vexon_candidates(fbar)
def get_Vexon_candidates(self, inFile):
print "inside get exon Vregions"
seqbuffer=[]
seqs_positive=[]
seqpos_buffer=[]
start_time = timeit.default_timer()
for strand in [1,-1]:
scnt=2
for record in SeqIO.parse(inFile, "fasta"):
if ( record.id.split("|")[3] not in self.contigs):
continue
print record.id.split("|")[3]
#raw_input("check if in contig")
if strand == 1:
Sequence=record.seq
else:
Sequence=record.seq.reverse_complement()
ix = 0
while ix < len(seq):
sbar=seq[ix: ix+20000]
x=[i.start()+ix for i in re.finditer("CAC", str(sbar))]
y=[i.start()+ix for i in re.finditer("AG", str(sbar))]
s=[(i,j) for i,j in itertools.product(x,y) if j>i and ( np.abs(i-j)>265 and np.abs(i-j)<285) and ((np.abs(i-j)-2)%3==0) ]
elapsed = timeit.default_timer() - start_time
print "----------", ix, "---------(", 100.*( 1.- float(len(seq) - ix)/float(len(seq))), "% )---(T=", elapsed,")---"
print len(s)
## ---------------MAIN ----------------------------------
if __name__ == '__main__':
WGS_prediction = 'WGS_Prediction.json'
json_data=open( WGS_prediction )
S = json.load(json_data)
json_data.close()
classes=[ 'ighv', 'iglv', 'igkv', 'trav','trbv','trgv', 'trdv']
mlist = []
mlist.append(sys.argv[1])
V = VregionsPredict(S, desc_method='PDT',loci_classes=classes, mammalList= mlist)
| bsd-3-clause |
mhdella/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
robertsj/libdetran | src/python/pydetranutils/quad_plot.py | 2 | 1931 | # This provides utilities for plotting things on a
# 1D or 2D mesh or a slice of a 3D mesh.
try :
import numpy as np
except ImportError :
print "Error importing Numpy"
try :
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
except ImportError :
print "Error importing matplotlib"
global __detranuselatex__ = False
try :
import os
print "Checking for LaTeX for nicer plotting labels..."
if (os.system("latex")==0) :
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
__detranuselatex__ = True
except ImportError :
print "Warning: LaTeX labels being skipped"
def plot_quadrature(quad) :
""" Plots a quadrature.
"""
try :
D = quad.dimension()
except :
print "Error getting quadrature dimension... maybe not a quadrature object?"
return
if D == 1 :
# Get the abscissa and weights
mu = np.asarray(quad.cosines(0))
wt = np.asarray(quad.weights())
# Plot
plt.plot(mu, wt, 'bo')
if __detranuselatex__ :
plt.xlabel('$\mu$')
else :
plt.xlabel('mu')
plt.ylabel('weight')
plt.show()
else :
# Get the abscissa and weights
mu = np.asarray(quad.cosines(0))
eta = np.asarray(quad.cosines(1))
xi = np.asarray(quad.cosines(2))
wt = np.asarray(quad.weights())
# Plot. Note, using my (old) version of matplotlib, the colors
# are not translated to the scatter plot. The sizes are, but
# it's not really enough. What's a good solution?
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(30, 60)
myplot = ax.scatter(mu,eta,xi,c=wt, s=100000*wt**2, marker='^')
labels = ['mu','eta','xi']
if __detranuselatex__ :
labels ['$\mu$', '$\eta$', '$\\xi$']
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
fig.colorbar(myplot)
plt.show()
| mit |
telefar/stockEye | coursera-compinvest1-master/coursera-compinvest1-master/Examples/Basic/movingavg-ex.py | 2 | 1060 | import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas
from pylab import *
#
# Prepare to read the data
#
symbols = ["AAPL","GLD","GOOG","$SPX","XOM"]
startday = dt.datetime(2008,1,1)
endday = dt.datetime(2009,12,31)
timeofday=dt.timedelta(hours=16)
timestamps = du.getNYSEdays(startday,endday,timeofday)
dataobj = da.DataAccess('Norgate')
voldata = dataobj.get_data(timestamps, symbols, "volume")
adjcloses = dataobj.get_data(timestamps, symbols, "close")
actualclose = dataobj.get_data(timestamps, symbols, "actual_close")
adjcloses = adjcloses.fillna()
adjcloses = adjcloses.fillna(method='backfill')
means = pandas.rolling_mean(adjcloses,20,min_periods=20)
# Plot the prices
plt.clf()
symtoplot = 'AAPL'
plot(adjcloses.index,adjcloses[symtoplot].values,label=symtoplot)
plot(adjcloses.index,means[symtoplot].values)
plt.legend([symtoplot,'Moving Avg.'])
plt.ylabel('Adjusted Close')
savefig("movingavg-ex.png", format='png')
| bsd-3-clause |
heli522/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
babsey/sumatra | test/system/test_ircr.py | 3 | 6521 | """
A run through of basic Sumatra functionality.
As our example code, we will use a Python program for analyzing scanning
electron microscope (SEM) images of glass samples. This example was taken from
an online SciPy tutorial at http://scipy-lectures.github.com/intro/summary-exercises/image-processing.html
Usage:
nosetests -v test_ircr.py
or:
python test_ircr.py
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
# Requirements: numpy, scipy, matplotlib, mercurial, sarge
import os
from datetime import datetime
import utils
from utils import (setup, teardown, run_test, build_command, assert_file_exists, assert_in_output,
assert_config, assert_label_equal, assert_records, assert_return_code,
edit_parameters, expected_short_list, substitute_labels)
from functools import partial
repository = "https://bitbucket.org/apdavison/ircr2013"
#repository = "/Volumes/USERS/andrew/dev/ircr2013" # during development
#repository = "/Users/andrew/dev/ircr2013"
def modify_script(filename):
def wrapped():
with open(os.path.join(utils.working_dir, filename), 'r') as fp:
script = fp.readlines()
with open(os.path.join(utils.working_dir, filename), 'w') as fp:
for line in script:
if "print(mean_bubble_size, median_bubble_size)" in line:
fp.write('print("Mean:", mean_bubble_size)\n')
fp.write('print("Median:", median_bubble_size)\n')
else:
fp.write(line)
return wrapped
test_steps = [
("Get the example code",
"hg clone %s ." % repository,
assert_in_output, "updating to branch default"),
("Run the computation without Sumatra",
"python glass_sem_analysis.py default_parameters MV_HFV_012.jpg",
assert_in_output, "2416.86315789 60.0",
assert_file_exists, os.path.join("Data", datetime.now().strftime("%Y%m%d")), # Data subdirectory contains another subdirectory labelled with today's date)
), # assert(subdirectory contains three image files).
("Set up a Sumatra project",
"smt init -d Data -i . ProjectGlass",
assert_in_output, "Sumatra project successfully set up"),
("Run the ``glass_sem_analysis.py`` script with Sumatra",
"smt run -e python -m glass_sem_analysis.py -r 'initial run' default_parameters MV_HFV_012.jpg",
assert_in_output, ("2416.86315789 60.0", "histogram.png")),
("Comment on the outcome",
"smt comment 'works fine'"),
("Set defaults",
"smt configure -e python -m glass_sem_analysis.py"),
("Look at the current configuration of the project",
"smt info",
assert_config, {"project_name": "ProjectGlass", "executable": "Python", "main": "glass_sem_analysis.py",
"code_change": "error"}),
edit_parameters("default_parameters", "no_filter", "filter_size", 1),
("Run with changed parameters and user-defined label",
"smt run -l example_label -r 'No filtering' no_filter MV_HFV_012.jpg", # TODO: assert(results have changed)
assert_in_output, "phases.png",
assert_label_equal, "example_label"),
("Change parameters from the command line",
"smt run -r 'Trying a different colourmap' default_parameters MV_HFV_012.jpg phases_colourmap=hot"), # assert(results have changed)
("Add another comment",
"smt comment 'The default colourmap is nicer'"), #TODO add a comment to an older record (e.g. this colourmap is nicer than 'hot')")
("Add tags on the command line",
build_command("smt tag mytag {0} {1}", "labels")),
modify_script("glass_sem_analysis.py"),
("Run the modified code",
"smt run -r 'Added labels to output' default_parameters MV_HFV_012.jpg",
assert_return_code, 1,
assert_in_output, "Code has changed, please commit your changes"),
("Commit changes...",
"hg commit -m 'Added labels to output' -u testuser"),
("...then run again",
"smt run -r 'Added labels to output' default_parameters MV_HFV_012.jpg"), # assert(output has changed as expected)
#TODO: make another change to the Python script
("Change configuration to store diff",
"smt configure --on-changed=store-diff"),
("Run with store diff",
"smt run -r 'made a change' default_parameters MV_HFV_012.jpg"), # assert(code runs, stores diff)
("Review previous computations - get a list of labels",
"smt list",
assert_in_output, expected_short_list),
("Review previous computations in detail",
"smt list -l",
assert_records, substitute_labels([
{'label': 0, 'executable_name': 'Python', 'outcome': 'works fine', 'reason': 'initial run',
'version': '6038f9c500d1', 'vcs': 'Mercurial', 'script_arguments': '<parameters> MV_HFV_012.jpg',
'main_file': 'glass_sem_analysis.py'}, # TODO: add checking of parameters
{'label': 1, 'outcome': '', 'reason': 'No filtering'},
{'label': 2, 'outcome': 'The default colourmap is nicer', 'reason': 'Trying a different colourmap'},
{'label': 3, 'outcome': '', 'reason': 'Added labels to output'},
{'label': 4, 'outcome': '', 'reason': 'made a change'}, # TODO: add checking of diff
])),
("Filter the output of ``smt list`` based on tag",
"smt list mytag",
#assert(list is correct)
),
("Export Sumatra records as JSON.",
"smt export",
assert_file_exists, ".smt/records_export.json"),
]
def test_all():
"""Test generator for Nose."""
for step in test_steps:
if callable(step):
step()
else:
test = partial(*tuple([run_test] + list(step[1:])))
test.description = step[0]
yield test
# Still to test:
#
#.. LaTeX example
#.. note that not only Python is supported - separate test
#.. play with labels? uuid, etc.
#.. move recordstore
#.. migrate datastore
#.. repeats
#.. moving forwards and backwards in history
#.. upgrades (needs Docker)
if __name__ == '__main__':
# Run the tests without using Nose.
setup()
for step in test_steps:
if callable(step):
step()
else:
print(step[0]) # description
run_test(*step[1:])
response = input("Do you want to delete the temporary directory (default: yes)? ")
if response not in ["n", "N", "no", "No"]:
teardown()
else:
print("Temporary directory %s not removed" % utils.temporary_dir)
| bsd-2-clause |
rmccoy7541/egillettii-rnaseq | scripts/model_A.synonymous.py | 1 | 3634 | #! /bin/env python
import sys
from optparse import OptionParser
import copy
import matplotlib
matplotlib.use('Agg')
import pylab
import scipy.optimize
import numpy
from numpy import array
import dadi
#import demographic models
import gillettii_models
def runModel(outFile, nuW_start, nuC_start, T_start):
# Parse the SNP file to generate the data dictionary
dd = dadi.Misc.make_data_dict('/mnt/CDanalysis2/dadi_manuscript/input_data/gillettii_data_syn.AN24.dadi')
# Extract the spectrum from the dictionary and project down to 12 alleles per population
fs = dadi.Spectrum.from_data_dict(dd, pop_ids=['WY','CO'], projections=[12,12], polarized=False)
#uncomment following line to perform conventional bootstrap
#fs = fs.sample()
ns = fs.sample_sizes
print 'sample sizes:', ns
# These are the grid point settings will use for extrapolation.
pts_l = [20,30,40]
# suggested that the smallest grid be slightly larger than the largest sample size. But this may take a long time.
# bottleneck_split model
func = gillettii_models.bottleneck_split
params = array([nuW_start, nuC_start, T_start])
upper_bound = [10, 10, 10]
lower_bound = [1e-5, 1e-10, 0]
# Make the extrapolating version of the demographic model function.
func_ex = dadi.Numerics.make_extrap_func(func)
# Calculate the model AFS
model = func_ex(params, ns, pts_l)
# Calculate likelihood of the data given the model AFS
# Likelihood of the data given the model AFS.
ll_model = dadi.Inference.ll_multinom(model, fs)
print 'Model log-likelihood:', ll_model, "\n"
# The optimal value of theta given the model.
theta = dadi.Inference.optimal_sfs_scaling(model, fs)
p0 = dadi.Misc.perturb_params(params, fold=1, lower_bound=lower_bound, upper_bound=upper_bound)
print 'perturbed parameters: ', p0, "\n"
popt = dadi.Inference.optimize_log_fmin(p0, fs, func_ex, pts_l, upper_bound=upper_bound, lower_bound=lower_bound, maxiter=None, verbose=len(params))
print 'Optimized parameters:', repr(popt), "\n"
#use the optimized parameters in a new model to try to get the parameters to converge
new_model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(new_model, fs)
print 'Optimized log-likelihood:', ll_opt, "\n"
# Write the parameters and log-likelihood to the outFile
s = str(nuW_start) + '\t' + str(nuC_start) + '\t' + str(T_start) + '\t'
for i in range(0, len(popt)):
s += str(popt[i]) + '\t'
s += str(ll_opt) + '\n'
outFile.write(s)
#################
def mkOptionParser():
""" Defines options and returns parser """
usage = """%prog <outFN> <nuW_start> <nuC_start> <T_start>
%prog performs demographic inference on gillettii RNA-seq data. """
parser = OptionParser(usage)
return parser
def main():
""" see usage in mkOptionParser. """
parser = mkOptionParser()
options, args= parser.parse_args()
if len(args) != 4:
parser.error("Incorrect number of arguments")
outFN = args[0]
nuW_start = float(args[1])
nuC_start = float(args[2])
T_start = float(args[3])
if outFN == '-':
outFile = sys.stdout
else:
outFile = open(outFN, 'a')
runModel(outFile, nuW_start, nuC_start, T_start)
#run main
if __name__ == '__main__':
main()
| mit |
fabioticconi/scikit-learn | sklearn/datasets/svmlight_format.py | 19 | 16759 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
nawawi/poedit | deps/boost/libs/metaparse/tools/benchmark/benchmark.py | 8 | 10484 | #!/usr/bin/python
"""Utility to benchmark the generated source files"""
# Copyright Abel Sinkovics (abel@sinkovics.hu) 2016.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import argparse
import os
import subprocess
import json
import math
import platform
import matplotlib
import random
import re
import time
import psutil
import PIL
matplotlib.use('Agg')
import matplotlib.pyplot # pylint:disable=I0011,C0411,C0412,C0413
def benchmark_command(cmd, progress):
"""Benchmark one command execution"""
full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd)
print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd)
(_, err) = subprocess.Popen(
['/bin/sh', '-c', full_cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
values = err.strip().split(' ')
if len(values) == 2:
try:
return (float(values[0]), float(values[1]))
except: # pylint:disable=I0011,W0702
pass # Handled by the code after the "if"
print err
raise Exception('Error during benchmarking')
def benchmark_file(
filename, compiler, include_dirs, (progress_from, progress_to),
iter_count, extra_flags = ''):
"""Benchmark one file"""
time_sum = 0
mem_sum = 0
for nth_run in xrange(0, iter_count):
(time_spent, mem_used) = benchmark_command(
'{0} -std=c++11 {1} -c {2} {3}'.format(
compiler,
' '.join('-I{0}'.format(i) for i in include_dirs),
filename,
extra_flags
),
(
progress_to * nth_run + progress_from * (iter_count - nth_run)
) / iter_count
)
os.remove(os.path.splitext(os.path.basename(filename))[0] + '.o')
time_sum = time_sum + time_spent
mem_sum = mem_sum + mem_used
return {
"time": time_sum / iter_count,
"memory": mem_sum / (iter_count * 1024)
}
def compiler_info(compiler):
"""Determine the name + version of the compiler"""
(out, err) = subprocess.Popen(
['/bin/sh', '-c', '{0} -v'.format(compiler)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
for line in (out + err).split('\n'):
mtch = gcc_clang.search(line)
if mtch:
return mtch.group(1) + ' ' + mtch.group(2)
return compiler
def string_char(char):
"""Turn the character into one that can be part of a filename"""
return '_' if char in [' ', '~', '(', ')', '/', '\\'] else char
def make_filename(string):
"""Turn the string into a filename"""
return ''.join(string_char(c) for c in string)
def files_in_dir(path, extension):
"""Enumartes the files in path with the given extension"""
ends = '.{0}'.format(extension)
return (f for f in os.listdir(path) if f.endswith(ends))
def format_time(seconds):
"""Format a duration"""
minute = 60
hour = minute * 60
day = hour * 24
week = day * 7
result = []
for name, dur in [
('week', week), ('day', day), ('hour', hour),
('minute', minute), ('second', 1)
]:
if seconds > dur:
value = seconds // dur
result.append(
'{0} {1}{2}'.format(int(value), name, 's' if value > 1 else '')
)
seconds = seconds % dur
return ' '.join(result)
def benchmark(src_dir, compiler, include_dirs, iter_count):
"""Do the benchmarking"""
files = list(files_in_dir(src_dir, 'cpp'))
random.shuffle(files)
has_string_templates = True
string_template_file_cnt = sum(1 for file in files if 'bmp' in file)
file_count = len(files) + string_template_file_cnt
started_at = time.time()
result = {}
for filename in files:
progress = len(result)
result[filename] = benchmark_file(
os.path.join(src_dir, filename),
compiler,
include_dirs,
(float(progress) / file_count, float(progress + 1) / file_count),
iter_count
)
if 'bmp' in filename and has_string_templates:
try:
temp_result = benchmark_file(
os.path.join(src_dir, filename),
compiler,
include_dirs,
(float(progress + 1) / file_count, float(progress + 2) / file_count),
iter_count,
'-Xclang -fstring-literal-templates'
)
result[filename.replace('bmp', 'slt')] = temp_result
except:
has_string_templates = False
file_count -= string_template_file_cnt
print 'Stopping the benchmarking of string literal templates'
elapsed = time.time() - started_at
total = float(file_count * elapsed) / len(result)
print 'Elapsed time: {0}, Remaining time: {1}'.format(
format_time(elapsed),
format_time(total - elapsed)
)
return result
def plot(values, mode_names, title, (xlabel, ylabel), out_file):
"""Plot a diagram"""
matplotlib.pyplot.clf()
for mode, mode_name in mode_names.iteritems():
vals = values[mode]
matplotlib.pyplot.plot(
[x for x, _ in vals],
[y for _, y in vals],
label=mode_name
)
matplotlib.pyplot.title(title)
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(ylabel)
if len(mode_names) > 1:
matplotlib.pyplot.legend()
matplotlib.pyplot.savefig(out_file)
def mkdir_p(path):
"""mkdir -p path"""
try:
os.makedirs(path)
except OSError:
pass
def configs_in(src_dir):
"""Enumerate all configs in src_dir"""
for filename in files_in_dir(src_dir, 'json'):
with open(os.path.join(src_dir, filename), 'rb') as in_f:
yield json.load(in_f)
def byte_to_gb(byte):
"""Convert bytes to GB"""
return byte / (1024.0 * 1024 * 1024)
def join_images(img_files, out_file):
"""Join the list of images into the out file"""
images = [PIL.Image.open(f) for f in img_files]
joined = PIL.Image.new(
'RGB',
(sum(i.size[0] for i in images), max(i.size[1] for i in images))
)
left = 0
for img in images:
joined.paste(im=img, box=(left, 0))
left = left + img.size[0]
joined.save(out_file)
def plot_temp_diagrams(config, results, temp_dir):
"""Plot temporary diagrams"""
display_name = {
'time': 'Compilation time (s)',
'memory': 'Compiler memory usage (MB)',
}
files = config['files']
img_files = []
if any('slt' in result for result in results) and 'bmp' in files.values()[0]:
config['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates'
for f in files.values():
f['slt'] = f['bmp'].replace('bmp', 'slt')
for measured in ['time', 'memory']:
mpts = sorted(int(k) for k in files.keys())
img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured)))
plot(
{
m: [(x, results[files[str(x)][m]][measured]) for x in mpts]
for m in config['modes'].keys()
},
config['modes'],
display_name[measured],
(config['x_axis_label'], display_name[measured]),
img_files[-1]
)
return img_files
def plot_diagram(config, results, images_dir, out_filename):
"""Plot one diagram"""
img_files = plot_temp_diagrams(config, results, images_dir)
join_images(img_files, out_filename)
for img_file in img_files:
os.remove(img_file)
def plot_diagrams(results, configs, compiler, out_dir):
"""Plot all diagrams specified by the configs"""
compiler_fn = make_filename(compiler)
total = psutil.virtual_memory().total # pylint:disable=I0011,E1101
memory = int(math.ceil(byte_to_gb(total)))
images_dir = os.path.join(out_dir, 'images')
for config in configs:
out_prefix = '{0}_{1}'.format(config['name'], compiler_fn)
plot_diagram(
config,
results,
images_dir,
os.path.join(images_dir, '{0}.png'.format(out_prefix))
)
with open(
os.path.join(out_dir, '{0}.qbk'.format(out_prefix)),
'wb'
) as out_f:
qbk_content = """{0}
Measured on a {2} host with {3} GB memory. Compiler used: {4}.
[$images/metaparse/{1}.png [width 100%]]
""".format(config['desc'], out_prefix, platform.platform(), memory, compiler)
out_f.write(qbk_content)
def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='generated',
help='The directory containing the sources to benchmark'
)
parser.add_argument(
'--out',
dest='out_dir',
default='../../doc',
help='The output directory'
)
parser.add_argument(
'--include',
dest='include',
default='include',
help='The directory containing the headeres for the benchmark'
)
parser.add_argument(
'--boost_headers',
dest='boost_headers',
default='../../../..',
help='The directory containing the Boost headers (the boost directory)'
)
parser.add_argument(
'--compiler',
dest='compiler',
default='g++',
help='The compiler to do the benchmark with'
)
parser.add_argument(
'--repeat_count',
dest='repeat_count',
type=int,
default=5,
help='How many times a measurement should be repeated.'
)
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(
args.src_dir,
args.compiler,
[args.include, args.boost_headers],
args.repeat_count
)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir)
if __name__ == '__main__':
main()
| mit |
rosswhitfield/mantid | scripts/MantidIPython/plot_functions.py | 3 | 4224 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
Plotting functions for use in IPython notebooks that are generated by MantidPlot
"""
import matplotlib.pyplot as plt
# Import Mantid
from mantid.simpleapi import *
import mantid.api as mapi
def _plot_with_options(axes_option, workspace, options_list, plot_number):
"""
Enable/disable legend, grid, limits according to
options (ops) for the given axes (ax).
Plot with or without errorbars.
"""
ws_plot = ConvertToPointData(workspace)
if options_list['errorbars']:
axes_option.errorbar(ws_plot.readX(0), ws_plot.readY(0),
yerr=ws_plot.readE(0), label=workspace.name())
else:
axes_option.plot(ws_plot.readX(0),
ws_plot.readY(0),
label=workspace.name())
axes_option.grid(options_list['grid'])
axes_option.set_xscale(options_list['xScale'])
axes_option.set_yscale(options_list['yScale'])
if options_list['xLimits'] != 'auto':
axes_option.set_xlim(options_list['xLimits'])
if options_list['yLimits'] != 'auto':
axes_option.set_ylim(options_list['yLimits'])
# If a list of titles was given, use it to title each subplot
if hasattr(options_list['title'], "__iter__"):
axes_option.set_title(options_list['title'][plot_number])
if options_list['legend'] and hasattr(options_list['legendLocation'], "__iter__"):
axes_option.legend(loc=options_list['legendLocation'][plot_number])
elif options_list['legend']:
axes_option.legend(loc=options_list['legendLocation'])
def plots(list_of_workspaces, *args, **kwargs):
"""
Create a figure with a subplot for each workspace given.
Workspaces within a group workspace are plotted together in the same subplot.
Examples:
plots(rr)
plots(rr, 'TheGraphTitle')
plots(rr, 'TheGraphTitle', grid=True, legend=True,
xScale='linear', yScale='log', xLimits=[0.008, 0.16])
plots(rr, sharedAxes = False, xLimits = [0, 0.1], yLimits = [1e-5, 2],
Title='ASF070_07 I=1A T=3K dq/q=2%',
legend=True, legendLocation=3, errorbars=False)
"""
if not hasattr(list_of_workspaces, "__iter__"):
list_of_workspaces = [list_of_workspaces]
ops = _process_arguments(args, kwargs)
# Create subplots for workspaces in the list
fig, axes_handle = plt.subplots(1,
len(list_of_workspaces),
sharey=ops['sharedAxes'],
figsize=(6 * len(list_of_workspaces), 4))
if not hasattr(axes_handle, "__iter__"):
axes_handle = [axes_handle]
for plot_number, workspace in enumerate(list_of_workspaces):
if isinstance(workspace, mapi.WorkspaceGroup):
# Plot grouped workspaces on the same axes
for sub_ws in workspace:
_plot_with_options(axes_handle[plot_number], sub_ws, ops, plot_number)
else:
_plot_with_options(axes_handle[plot_number], workspace, ops, plot_number)
# If a single title was given, use it to title the whole figure
if not hasattr(ops['title'], "__iter__"):
fig.suptitle(ops['title'])
plt.show()
return plt.gcf()
def _process_arguments(input_args, input_kwargs):
"""
Build a dictionary of plotting options
"""
key_list = ['title', 'grid', 'legend', 'legendLocation',
'xScale', 'yScale', 'xLimits', 'yLimits', 'sharedAxes', 'errorbars']
default_values = ['', True, True, 1, 'log', 'log', 'auto', 'auto', True, 'True']
# Fill ops with the default values
for i in range(len(input_args)): # copy in values provided in args
default_values[i] = input_args[i]
ops = dict(zip(key_list, default_values))
for k in ops.keys(): # copy in any key word given arguments
ops[k] = input_kwargs.get(k, ops[k])
return ops
| gpl-3.0 |
MaxHalford/StSICMR-Inference | simulations/plotting.py | 1 | 4200 | import matplotlib.pyplot as plt
import numpy as np
plt.style.use('fivethirtyeight')
def plotModel(model, times, lambdas=None, logScale=False,
save=None, show=True):
''' Plot a model in green and a determined lambda function in red. '''
plt.clf()
plt.grid(color='white', linestyle='solid')
# Evaluate the model at the given time steps
lambda_s = [model.lambda_s(t) for t in times]
# Plot the model in red
plt.step(times, lambda_s, label='Obtained', linewidth=3,
where='post', color='red', alpha=0.5)
# Plot the lambda function in green
if lambdas is not None:
plt.step(times, lambdas, label='Model', linewidth=3,
where='post', color='green', alpha=0.5)
# Compute the squared error
squared_error = np.sum(((lambdas[i] - lambda_s[i]) ** 2
for i in range(len(lambdas))))
plt.title('Least squares: ' + str(squared_error))
# Add the migration rate time changes as vertical lines
for t in model.T_list[1:]:
plt.axvline(t, linestyle='--', color='gray', alpha=0.5)
# Set x scale to logarithmic
if logScale is True:
plt.xscale('log')
# Annotate the graph
plt.suptitle('Structured model inference of lambda function',
fontsize=14, fontweight='bold')
plt.legend(loc=4)
plt.xlabel('Time going backwards')
plt.ylabel('Lambda')
# Add model information to the top-left corner
information = '''
n: {0}
T: {1}
M: {2}'''.format(model.n,
np.round(model.T_list, 2),
np.round(model.M_list, 2))
plt.annotate(information, xy=(0, 1), xycoords='axes fraction',
fontsize=13, ha='left', va='top', xytext=(5, -5),
textcoords='offset points')
# Save the figure
if save is not None:
figure = plt.gcf()
figure.set_size_inches(20, 14)
plt.savefig(save, dpi=100)
if show is True:
plt.show()
def plotInference(model, times, lambdas, true_n, true_T, true_M,
logScale=False, save=None, show=False):
''' Plot an inference of the parameters in a structured model. '''
plt.clf()
# Evaluate the model at the given time steps
lambda_s = [model.lambda_s(t) for t in times]
# Compute the squared error
squared_error = np.sum(((lambdas[i] - lambda_s[i]) ** 2
for i in range(len(lambdas))))
# Plot the model in red
plt.step(times, lambda_s, label='Obtained', linewidth=3,
where='post', color='red', alpha=0.5)
# Plot the lambda function in green
plt.step(times, lambdas, label='Model', linewidth=3,
where='post', color='green', alpha=0.5)
# Add the migration rate time changes as vertical lines
for t in zip(model.T_list[1:], true_T[1:]):
plt.axvline(t[0], linestyle='--', color='red', alpha=0.7)
plt.axvline(t[1], linestyle='--', color='green', alpha=0.7)
# Set x scale to logarithmic time
if logScale is True:
plt.xscale('log')
# Annotate the graph
plt.suptitle('Genetic Algorithm VS Model',
fontsize=14, fontweight='bold')
plt.title('Least squares: {0}'.format(squared_error))
plt.legend(loc=4)
plt.xlabel('Time going backwards')
plt.ylabel('Lambda')
# Add comparison legend in the top-left corner
information = '''
True n: {0}
Obtained n: {1}
True T: {2}
Obtained T: {3}
True M: {4}
Obtained M: {5}'''.format(true_n,
model.n,
np.round(list(true_T), 2),
np.round(list(model.T_list), 2),
np.round(list(true_M), 2),
np.round(list(model.M_list), 2))
plt.annotate(information, xy=(0, 1), xycoords='axes fraction',
fontsize=13, ha='left', va='top', xytext=(5, -5),
textcoords='offset points')
# Save the figure
if save is not None:
figure = plt.gcf()
figure.set_size_inches(20, 14)
plt.savefig(save, dpi=100)
if show is True:
plt.show()
| mit |
ishank08/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
rrohan/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
thebucknerlife/caravel | setup.py | 1 | 1426 | from setuptools import setup, find_packages
version = '0.8.4'
setup(
name='caravel',
description=(
"A interactive data visualization platform build on SqlAlchemy "
"and druid.io"),
version=version,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['caravel/bin/caravel'],
install_requires=[
'alembic>=0.8.5, <0.9.0',
'cryptography>=1.1.1, <2.0.0',
'flask-appbuilder>=1.6.0, <2.0.0',
'flask-cache>=0.13.1, <0.14.0',
'flask-migrate>=1.5.1, <2.0.0',
'flask-script>=2.0.5, <3.0.0',
'flask-sqlalchemy==2.0.0',
'flask-testing>=0.4.2, <0.5.0',
'flask>=0.10.1, <1.0.0',
'humanize>=0.5.1, <0.6.0',
'gunicorn>=19.3.0, <20.0.0',
'markdown>=2.6.2, <3.0.0',
'numpy>=1.9, <2',
'pandas==0.18.0, <0.19.0',
'parsedatetime==2.0.0',
'pydruid>=0.2.2, <0.3',
'python-dateutil>=2.4.2, <3.0.0',
'requests>=2.7.0, <3.0.0',
'sqlalchemy>=1.0.12, <2.0.0',
'sqlalchemy-utils>=0.31.3, <0.32.0',
'sqlparse>=0.1.16, <0.2.0',
'werkzeug>=0.11.2, <0.12.0',
],
tests_require=['coverage'],
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/caravel',
download_url=(
'https://github.com/airbnb/caravel/tarball/' + version),
)
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/utils/testing.py | 4 | 30964 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
import unittest
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from nose.tools import raises
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal", "SkipTest"]
_dummy = unittest.TestCase('__init__')
assert_equal = _dummy.assertEqual
assert_not_equal = _dummy.assertNotEqual
assert_true = _dummy.assertTrue
assert_false = _dummy.assertFalse
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_in = _dummy.assertIn
assert_not_in = _dummy.assertNotIn
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_less_equal = _dummy.assertLessEqual
assert_greater_equal = _dummy.assertGreaterEqual
try:
assert_raises_regex = _dummy.assertRaisesRegex
except AttributeError:
# Python 2.7
assert_raises_regex = _dummy.assertRaisesRegexp
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
function : callable
Calable object to raise error
*args : the positional arguments to `function`.
**kw : the keyword arguments to `function`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : array-like or sparse matrix
First array to compare.
y : array-like or sparse matrix
Second array to compare.
rtol : float, optional
relative tolerance; see numpy.allclose
atol : float, optional
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : string, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble", "ClassifierChain"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
class _named_check(object):
"""Wraps a check to show a useful description
Parameters
----------
check : function
Must have ``__name__`` and ``__call__``
arg_text : str
A summary of arguments to the check
"""
# Setting the description on the function itself can give incorrect results
# in failing tests
def __init__(self, check, arg_text):
self.check = check
self.description = ("{0[1]}.{0[3]}:{1.__name__}({2})".format(
inspect.stack()[1], check, arg_text))
def __call__(self, *args, **kwargs):
return self.check(*args, **kwargs)
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments"""
# NOTE this works only in python3.5
if sys.version_info < (3, 5):
NotImplementedError("_get_args is not available for python < 3.5")
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func, class_name=None):
"""Get function full name
Parameters
----------
func : callable
The function object.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if class_name is not None:
parts.append(class_name)
elif hasattr(func, 'im_class'):
parts.append(func.im_class.__name__)
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None, class_name=None):
"""Helper to check docstring
Parameters
----------
func : callable
The function object to test.
doc : str, optional (default: None)
Docstring if it is passed manually to the test.
ignore : None | list
Parameters to ignore.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func, class_name=class_name)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
args = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(args) > 0 and args[0] == 'self':
args.remove('self')
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_names = []
for name, type_definition, param_doc in doc['Parameters']:
if (type_definition.strip() == "" or
type_definition.strip().startswith(':')):
param_name = name.lstrip()
# If there was no space between name and the colon
# "verbose:" -> len(["verbose", ""][0]) -> 7
# If "verbose:"[7] == ":", then there was no space
if param_name[len(param_name.split(':')[0].strip())] == ':':
incorrect += [func_name +
' There was no space between the param name and '
'colon ("%s")' % name]
else:
incorrect += [func_name + ' Incorrect type definition for '
'param: "%s" (type definition was "%s")'
% (name.split(':')[0], type_definition)]
if '*' not in name:
param_names.append(name.split(':')[0].strip('` '))
param_names = list(filter(lambda x: x not in ignore, param_names))
if len(param_names) != len(args):
bad = str(sorted(list(set(param_names) ^ set(args))))
incorrect += [func_name + ' arg mismatch: ' + bad]
else:
for n1, n2 in zip(param_names, args):
if n1 != n2:
incorrect += [func_name + ' ' + n1 + ' != ' + n2]
return incorrect
| mit |
prheenan/Research | Perkins/Projects/Protein/alpha3d/2016-11-30-devin-iwt/Main_alpha3d_iwt.py | 1 | 2674 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../../../")
from Research.Perkins.AnalysisUtil.EnergyLandscapes import IWT_Util,IWT_Plot
from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util
from IgorUtil.PythonAdapter import PxpLoader
from FitUtil.EnergyLandscapes.InverseWeierstrass.Python.Code import \
InverseWeierstrass
def run():
"""
<Description>
Args:
param1: This is the first param.
Returns:
This is a description of what is returned.
"""
Base = "./"
OutBase = Base + "out/"
InFiles = [Base + "ForPatrick.pxp"]
RawData = IWT_Util.\
ReadInAllFiles(InFiles,Limit=50,
ValidFunc=PxpLoader.valid_fec_allow_endings)
# get the start/ends of the re-folding and unfolding portions
# hard coded constant for now...
# XXX for re-folding, need to add in schedule
# XXX ensure properly zeroed?
idx_end_of_unfolding = 8100
unfold,refold = [],[]
for d in RawData:
# flip the sign, so force goes up
d.Force *= -1
# get the unfolding and unfolds
slice_unfolding = slice(0,idx_end_of_unfolding)
unfold_tmp = FEC_Util.MakeTimeSepForceFromSlice(d,slice_unfolding)
slice_folding = slice(idx_end_of_unfolding,idx_end_of_unfolding*2)
fold_tmp = FEC_Util.MakeTimeSepForceFromSlice(d,slice_folding)
# arbitrarily assign the minimum separaiton to the lower 5%.
MinV = np.percentile(unfold_tmp.Separation,5)
unfold_tmp.Separation -= MinV
fold_tmp.Separation -= MinV
unfold.append(unfold_tmp)
refold.append(fold_tmp)
# convert all the unfolding objects to IWT data
IwtData = IWT_Util.ToIWTObjects(unfold)
IwtData_fold = IWT_Util.ToIWTObjects(refold)
# switch the velocities of all the folding objects..
for o in IwtData_fold:
o.Velocity *= -1
# get the titled landscape...
Bounds = IWT_Util.BoundsObj(bounds_folded_nm=[20,30],
bounds_transition_nm=[26,35],
bounds_unfolded_nm=[32,40],
force_one_half_N=13e-12)
IWT_Plot.InTheWeedsPlot(OutBase="./out/",
UnfoldObj=IwtData,
bounds=Bounds,Bins=[40,60,80],
max_landscape_kT=None,
min_landscape_kT=None)
if __name__ == "__main__":
run()
| gpl-3.0 |
dhesse/parmalgt-analysis | actions.py | 1 | 7884 | """
:mod:`actions` -- Methods to perform the data analysis
=======================================================
.. module: actions
In this module, we collect the functions that can be applied to the
data.
"""
from puwr import tauint
from math import log
import numpy as np
from scipy.linalg import svd, diagsvd, norm
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True
def pretty_print(val,err,extra_err_digits = 1):
if isinstance(val, int):
return "{0}({1})".format(val, int(err))
digits = 1 + -int(log(err, 10)) + extra_err_digits
err = int(err * 10 ** digits + 0.5)
if err == 10 and extra_err_digits != 1:
err = 1
digits -= 1
return "{0:.{1}f}({2})".format(val, digits, err)
def show(data, arg_dict):
"""Display the mean value, estimated auto-correlaton and error
thereof."""
for label in sorted(data.keys()):
print "* label:", label
for o in arg_dict["orders"]:
print " * order:", o
mean, delta, tint, dtint = tauint(data[label].data,
o, plots=arg_dict['uwplot'])
print " mean:", pretty_print(mean, delta)
print " tint:", pretty_print(tint, dtint)
class ContinuumLimit(object):
"""Class to estimate continuum limits, as presented in [hep-lat/9911018].
:param data_in: The input data, in the standard format (param,
[data]), where param is expected to contain the key 'L', wich is
interpreted as some sort of lattice size, to be taken to
infinity
:param fns: The functions the data is to be fitted to.
:param delta: The error on the input data.
:param wij: The weights of the data points.
"""
def __init__(self, data_in, fns, delta = None, wij = None):
if not wij:
wij = [1]*len(data_in)
assert len(wij) == len(data_in)
self.W = np.mat(np.diag(wij))
data = {}
errsq = {}
for param, [obj] in data_in:
try:
data[param['L']] = obj.loop
errsq[param['L']] = obj.esq
except AttributeError:
data[param['L']] = obj
errsq[param['L']] = 0
# naive error estimate
if not delta:
self.deltasq = np.mat([errsq[L] for L in sorted(errsq)]).transpose()
else:
self.deltasq = np.mat([d**2 for d in delta])
try:
self.f = np.mat([[f(L) for f in fns] for L in sorted(data)])
except ValueError as e:
print "ERROR"
print "It seems like there are multiple data points for the"
print "same lattice size L. I don't know what to do with this"
print "kind of input, so I will abort."
print "INPUT DATA:"
print data_in
sys.exit()
self.F = np.mat([data[L] for L in sorted(data)]).transpose()
def estimate(self, Imin):
"""Starting from the Imin-th lattice size, determine the best
fit parameters. This uses scipy's built-in singual value
decomposition."""
M, N = self.f.shape
assert Imin < M
M, N = self.f[Imin:, :].shape
U, s, Vt = svd((self.W*self.f)[Imin:,])
Sinv = np.mat(diagsvd(1/s, N, M))
Ut = np.mat(U.transpose())
V = np.mat(Vt.transpose())
finv = V * Sinv * Ut
# check estimate
alpha = finv * self.W * self.F[Imin:,:]
# propagate errors
finvsq = np.mat(np.array(finv)**2)
delta = finvsq * self.W**2 * self.deltasq[Imin:,:]
r = norm(self.f[Imin:,:] * alpha - self.F[Imin:,:])
self.residual = r
return alpha, delta
class dummy:
def __init__(self, d, e):
self.loop = d
self.esq = e*e
def extrapolate_cl(f, xdata, ydata, yerr):
data = [({"L": x}, [dummy(y, d)])
for x, y, d in zip(xdata, ydata, yerr)]
# uncomment the end of the next line (and delte the ")") for a
# weighted fit
cl = ContinuumLimit(data, f)#, wij = [1/x/x for x in yerr])
return cl.estimate(0)
def mk_plot(plot):
fig = plt.figure()
pl = fig.add_subplot(111)
max_xvals = [max(i[0]) for i in plot.data]
# give the plot some space
plt.xlim((-.00025,max(max_xvals)+.00025))
pl.set_xlabel("$\\tau_g$")
pl.set_ylabel(plot.ylabel)
#pl.set_ylabel(ylabel)
fmts = ["bo", "ro", "go", "yo"]*3
for (x, y, dy), marker, l in zip(plot.data, fmts,
plot.labels):
plt.errorbar(x, y, yerr=dy, markersize=10,
fmt = marker, label=l)
pl.legend(loc='upper center', numpoints=1,
bbox_to_anchor=(0.5,1.05), ncol=4)
for (y, dy), marker in zip(plot.cl, fmts):
plt.errorbar(0, y, yerr=dy, markersize=10,
fmt = marker)
for x, y in plot.fit:
plt.plot(x, y, "r--", c='black')
for y in plot.known:
plt.errorbar( [0], [y], markersize=10, fmt="m^")
plt.savefig(plot.pdfname)
def therm(data, arg_dict):
"""Estimate thermalization effects, make a plot."""
for label in sorted(data.keys()):
print "* label:", label
for o in arg_dict["orders"]:
ydata = []
dydata = []
print " * order:", o
for nc in arg_dict['cutoffs']:
mean, delta, tint, dtint = \
tauint(data[label].data[:,:,nc:], o)
ydata.append(mean)
dydata.append(delta)
plt.errorbar(arg_dict['cutoffs'], ydata, yerr=dydata)
plt.show()
def extrapolate(data, arg_dict, f = (lambda x: 1., lambda x: x)):
"""Extrapolate data. Optionally make a plot."""
# check if target lattice sizes are given
# if not, do the extrapolation for all lattice sizes
if not arg_dict['L_sizes']:
arg_dict['L_sizes'] = sorted(set([d.L for d in data.values()]))
for o in arg_dict["orders"]:
print " * order = g^" + str(o)
x, y, dy, cl, dcl, ffn = [], [], [], [], [], []
for L in arg_dict['L_sizes']:
print " * L =", L
[i.append([]) for i in x, y, dy]
for label in data:
if data[label].L != L:
continue
print " ** label:", label
mean, delta, tint, dtint = \
tauint(data[label].data, o, plots=arg_dict['uwplot'])
x[-1].append(data[label].tau)
y[-1].append(mean)
dy[-1].append(delta)
print " mean:", pretty_print(mean, delta)
print " tint:", pretty_print(tint, dtint)
print " ** tau -> 0 limit"
coeffs,errors = extrapolate_cl(f, x[-1], y[-1], dy[-1])
ffn.append(lambda x : np.sum( c[0,0] * f(x)
for c,f in zip(coeffs, f)))
cl.append(coeffs[0,0])
dcl.append(errors[0,0]**0.5)
sxsq = sum(xx**2 for xx in x[-1])
sx = sum(x[-1])
sa = np.sqrt(sum( ((sxsq - sx*xx)/(3*sxsq - sx**2))**2*yy**2
for xx, yy in zip(x[-1],dy[-1])))
assert(abs((dcl[-1] - sa)/sa) < 1e-12)
print " cl:", pretty_print(cl[-1], dcl[-1])
print " " + "*"*50
for plt in (p for p in arg_dict["mk_plots"]
if L in p.L and o in p.orders):
plt.data.append((x[-1], y[-1], dy[-1]))
plt.cl.append((cl[-1], dcl[-1]))
fnx = np.linspace(0, max(x[-1]), 100)
plt.fit.append((fnx, [ffn[-1](i) for i in fnx]))
plt.labels.append("$L = {0}$".format(L))
for plt in arg_dict["mk_plots"]:
mk_plot(plt)
| mit |
timsnyder/bokeh | examples/app/gapminder/main.py | 5 | 2769 | # -*- coding: utf-8 -*-
import pandas as pd
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (ColumnDataSource, HoverTool, SingleIntervalTicker,
Slider, Button, Label, CategoricalColorMapper)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
df = pd.concat({'fertility': fertility_df,
'life': life_expectancy_df,
'population': population_df_size},
axis=1)
data = {}
regions_df.rename({'Group':'region'}, axis='columns', inplace=True)
for year in years:
df_year = df.iloc[:,df.columns.get_level_values(1)==year]
df_year.columns = df_year.columns.droplevel(1)
data[year] = df_year.join(regions_df.region).reset_index().to_dict('series')
source = ColumnDataSource(data=data[years[0]])
plot = figure(x_range=(1, 9), y_range=(20, 100), title='Gapminder Data', plot_height=300)
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Children per woman (total fertility)"
plot.yaxis.ticker = SingleIntervalTicker(interval=20)
plot.yaxis.axis_label = "Life expectancy at birth (years)"
label = Label(x=1.1, y=18, text=str(years[0]), text_font_size='70pt', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
plot.circle(
x='fertility',
y='life',
size='population',
source=source,
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
legend=field('region'),
)
plot.add_tools(HoverTool(tooltips="@Country", show_arrow=False, point_policy='follow_mouse'))
def animate_update():
year = slider.value + 1
if year > years[-1]:
year = years[0]
slider.value = year
def slider_update(attrname, old, new):
year = slider.value
label.text = str(year)
source.data = data[year]
slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
slider.on_change('value', slider_update)
callback_id = None
def animate():
global callback_id
if button.label == '► Play':
button.label = '❚❚ Pause'
callback_id = curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(callback_id)
button = Button(label='► Play', width=60)
button.on_click(animate)
layout = layout([
[plot],
[slider, button],
], sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Gapminder"
| bsd-3-clause |
pizzathief/scipy | scipy/interpolate/fitpack.py | 5 | 25629 | __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
# These are in the API for fitpack even if not used in fitpack.py itself.
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-D space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext)
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
| bsd-3-clause |
tBuLi/symfit | examples/ode_reaction_kinetics_simple.py | 1 | 1250 | from symfit import variables, Parameter, Fit, D, ODEModel
import numpy as np
import matplotlib.pyplot as plt
# First order reaction kinetics. Data taken from
# http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law
tdata = np.array([0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949,
31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827,
65.6603, 70.0939])
concentration = np.array([0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238,
0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393,
0.0324, 0.026])
# Define our ODE model
A, B, t = variables('A, B, t')
k = Parameter('k')
model = ODEModel(
{D(A, t): - k * A, D(B, t): k * A},
initial={t: tdata[0], A: concentration[0], B: 0.0}
)
fit = Fit(model, A=concentration, t=tdata)
fit_result = fit.execute()
print(fit_result)
# Plotting, irrelevant to the symfit part.
t_axis = np.linspace(0, 80)
A_fit, B_fit, = model(t=t_axis, **fit_result.params)
plt.scatter(tdata, concentration)
plt.plot(t_axis, A_fit, label='[A]')
plt.plot(t_axis, B_fit, label='[B]')
plt.xlabel('t /min')
plt.ylabel('[X] /M')
plt.ylim(0, 1)
plt.xlim(0, 80)
plt.legend(loc=1)
plt.show() | gpl-2.0 |
khkaminska/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
jlegendary/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
madjelan/scikit-learn | sklearn/linear_model/least_angle.py | 57 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
iitjee/SteppinsMachineLearning | Tensorflow/01 TF Fundamentals/07 CNN-kaden.py | 1 | 4567 | # Let's create a Gaussian curve!
# The 1 dimensional gaussian takes two parameters, the mean value, and the standard deviation, which is commonly denoted by the name sigma.
mean = 0.0
sigma = 1.0
# Don't worry about trying to learn or remember this formula. I always have to refer to textbooks or check online for the exact formula.
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
res = z.eval()
plt.plot(res)
# if nothing is drawn, and you are using ipython notebook, uncomment the next two lines:
#%matplotlib inline
#plt.plot(res)
Convolution
'''Creating a 2-D Gaussian Kernel'''
# Let's store the number of values in our Gaussian curve.
ksize = z.get_shape().as_list()[0] #ksize = kernel size
# Let's multiply the two to get a 2d gaussian
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
# Execute the graph
plt.imshow(z_2d.eval())
#for some reason an error is coming^
Convolving an Image with a Gaussian
A very common operation that we'll come across with Deep Learning is convolution. We're going to explore what this means using our new
gaussian kernel that we've just created. For now, just think of it as a way of filtering information. We're going to effectively
filter our image using this Gaussian function, as if the gaussian function is the lens through which we'll see our image data.
What it will do is at every location we tell it to filter, it will average the image values around it based on what the kernel's
values are.
The Gaussian's kernel is basically saying, take a lot the center, a then decesasingly less as you go farther away from the
center. The effect of convolving the image with this type of kernel is that the entire image will be blurred. If you would like an
interactive exploratin of convolution, this website is great: http://setosa.io/ev/image-kernels/
# Let's first load an image. We're going to need a grayscale image to begin with. skimage has some images we can play with. If you
#do not have the skimage module, you can load your own image, or get skimage by pip installing "scikit-image".
from skimage import data
import numpy as np
img = data.camera().astype(np.float32) #data.camera() returns a predefined photo
plt.imshow(img, cmap='gray')
print(img.shape)
'''
Notice our img shape is 2-dimensional.
For image convolution in Tensorflow, we need our images to be 4 dimensional.
Remember that when we load many iamges and combine them in a single numpy array, the resulting shape has the number of images first.
N x H x W x C
(Number of Images x Image Height x Image Width x Number of Channels)
In order to perform 2d convolution with tensorflow, we'll need the same dimensions for our image. With just 1 grayscale image, this means the shape will be:
1 x H x W x 1 (since C = 1 for grayscale)
'''
# We could use the numpy reshape function to reshape our numpy array
img_4d = img.reshape([1, img.shape[0], img.shape[1], 1])
print(img_4d.shape)
# but since we'll be using tensorflow, we can use the tensorflow reshape function:
img_4d = tf.reshape(img, [1, img.shape[0], img.shape[1], 1])
print(img_4d)
'''
output: (1, 512, 512, 1)
Tensor("Reshape_2:0", shape=(1, 512, 512, 1), dtype=float32)
Instead of getting a numpy array back, we get a tensorflow tensor. This means we can't access the shape parameter like we did with the
numpy array. But instead, we can use get_shape(), and get_shape().as_list():
print(img_4d.get_shape())
print(img_4d.get_shape().as_list())
output: (1, 512, 512, 1)
[1, 512, 512, 1]
'''
'''
We'll also have to reshape our Gaussian Kernel to be 4-dimensional as well. The dimensions for kernels are slightly different
Remember that the image is:
Number of Images x Image Height x Image Width x Number of Channels
we have:
Kernel Height x Kernel Width x Number of Input Channels x Number of Output Channels
Our Kernel already has a height and width of ksize so we'll stick with that for now. (ksize defined above)
The number of input channels should match the
number of channels on the image we want to convolve.
And for now, we just keep the same number of output channels as the input channels,
but we'll later see how this comes into play.
'''
# Reshape the 2d kernel to tensorflow's required 4d format: H x W x I x O
z_4d = tf.reshape(z_2d, [ksize, ksize, 1, 1])
print(z_4d.get_shape().as_list())
output: [100, 100, 1, 1]
| apache-2.0 |
qiudebo/13learn | other/stock/stockholm.py | 1 | 6150 |
import argparse
import option
import csv
import os
import datetime
import json
import re
import io
import requests
import timeit
import time
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
import pandas as pd
from pandas.io.excel import ExcelFile
import codecs
class Stockholm(object):
def __init__(self,args):
self.reload_data=args.reload_data
self.gen_portfolio = args.gen_portfolio
self.output_type=args.output_type
self.charset=args.charset
self.test_date_range=args.test_date_range
self.start_date=args.start_date
self.end_date=args.end_date
self.target_date=args.target_date
self.thread=args.thread
if(args.store_path=='./tmp/stockholm_export'):
self.export_folder=os.path.expanduser('~')+'/tmp/stockholm_export'
else:
self.export_folder=args.export_folder
self.testfile_path=args.testfile_path
self.methods=args.methods
self.all_quotes_url='http://money.finance.sina.com.cn/d/api/openapi_proxy.php'
self.export_file_name = 'stockholm_export'
self.index_array=['000001.SS','399001.SZ','000300.SS']
self.sh000001={'Symbol':'000001.SS','Name':u'上证指数'}
self.sz399001={'Symbol':'399001.SS','Name':u'深证指数'}
self.sh000300={'Symbol':'399005.SZ','Name':u'中小板指数'}
def data_load(self,start_date,end_date,output_types):
all_quotes=self.load_all_quote_symbol()
code=[]
name=[]
for quote in all_quotes:
code.append(quote['Symbol'])
name.append(quote['Name'])
df=pd.DataFrame(list(zip(code,name)),columns=['symbol','name'])
df.to_csv("./shsz.csv", encoding='utf_8_sig')
# for quotes in all_quotes:
# print(quotes)
#self.load_all_quote_data(all_quotes,start_date,end_date)
#self.data_export(all_quotes,output_types,None)
print(str("total "+str(len(all_quotes)))+" quotes are loaded"+"\n")
def load_all_quote_symbol(self):
print("loading all quotes sysmbol start..."+"\n")
start = timeit.default_timer()
all_quotes=[]
all_quotes.append(self.sh000001)
all_quotes.append(self.sz399001)
all_quotes.append(self.sh000300)
try:
count=1
while(count<100):
para_val='[["hq","hs_a","",0,'+str(count)+',500]]'
r_params={'__s':para_val}
r=requests.get(self.all_quotes_url,params=r_params)
if(len(r.json()[0]['items'])==0):
break
for item in r.json()[0]['items']:
quote={}
code=item[0]
name=item[2]
# if(code.find('sh')>-1):
# code=code[2:]+'.SS'
# elif(code.find('sz')>-1):
# code=code[2:]+'.SZ'
quote['Symbol']=code
quote['Name']=name
all_quotes.append(quote)
count+=1
except Exception as e:
print("Error:Failed to load all stock symbol..."+"\n")
print(e)
print("load_all_quote_symbol end ... time cost"+str(round(timeit.default_timer()-start))+"s"+"\n")
return all_quotes
def load_all_quote_data(self,all_quotes,start_date,end_date):
print("load_all_quote_data start ..." + "\n")
start = timeit.default_timer()
counter = []
mapfunc = partial(self.load_quote_data,start_date=start_date,end_date=end_date,is_retry=False,counter=counter)
pool=ThreadPool(self.thread)
pool.map(mapfunc,all_quotes)
pool.close()
pool.join()
print("load_all_quote_data end ... time cost "+str(round(timeit.default_timer()-start))+"s"+"\n")
return all_quotes
def load_quote_data(self,quote,start_date,end_date,is_retry,counter):
print("load_quote_date begin ..."+"\n")
start = timeit.default_timer()
if(quote is not None and quote['Symbol'] is not None):
try:
url='http://data.gtimg.cn/flashdata/hushen/latest/daily/' + quote['Symbol'] + '.js'
r=requests.get(url)
print(r.url)
except Exception as e:
print("load_quote_date failed ... "+quote['Symbol']+"/"+quote['Name']+"\n")
if (not is_retry):
time.sleep(2)
self.load_quote_data(quote,start_date,end_date,True,counter)
return quote
def get_columns(self,quote):
columns=[]
if(quote is not None):
for key in quote.keys():
if(key=='Data'):
for data_key in quote['Data'][-1]:
columns.append("data."+data_key)
else:
columns.append(key)
columns.sort()
return columns
def data_export(self,all_quotes,export_type_array,file_name):
start=timeit.default_timer()
directory=self.export_folder
if(file_name is None):
file_name=self.export_file_name
if not os.path.exists(directory):
os.makedirs(directory)
if(all_quotes is None or len(all_quotes)==0):
print("no data to export ..."+"\n")
if('json' in export_type_array):
print("start export to JSON file ... \n")
f=io.open(directory+"/"+file_name+'.json','w',encoding=self.charset)
json.dump(all_quotes,f,ensure_ascii=False)
if('csv' in export_type_array):
print("start export to CSV file ...\n")
columns=[]
if(all_quotes is not None and len(all_quotes)>0):
columns=self.get_columns(all_quotes[0])
writer=csv.writer(open(directory+"/"+file_name+'.csv','w',encoding=self.charset))
writer.writerow(columns)
for quote in all_quotes:
if('Data' in quote):
for quote_data in quote['Data']:
try:
line=[]
for column in columns:
if(column,find('data.')>-1):
if(column[5:] in quote_data):
line.append(quote_data[column[5:]])
else:
line.append(quote[column])
writer.writerow(line)
except Exception as e:
print(e)
print("write csv error "+quote)
print("export is complete... time cost "+str(round(timeit.default_timer()-start))+"s"+"\n")
def run(self):
output_types=[]
if(self.output_type=="json"):
output_types.append("json")
elif(self.output_type=="csv"):
output_types.append("csv")
elif(self.output_type=="all"):
output_types=['json','csv']
if(self.reload_data=='Y'):
self.data_load(self.start_date,self.end_date,output_types)
if __name__=='__main__':
args = option.parser.parse_args()
stockh = Stockholm(args)
stockh.run()
print("A")
| mit |
materialsproject/pymatgen | pymatgen/io/abinit/pseudos.py | 5 | 65306 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
import abc
import collections
import logging
import os
import sys
from collections import OrderedDict, defaultdict, namedtuple
import numpy as np
from monty.collections import AttrDict, Namespace
# from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MontyDecoder, MSONable
from monty.os.path import find_exts
from monty.string import is_string, list_strings
from tabulate import tabulate
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from pymatgen.util.serialization import pmg_serialize
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, "r") as fh:
return fh.readlines()
lines = []
with open(filename, "r") as fh:
for lineno, line in enumerate(fh):
if lineno == nlines:
break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None:
return False
return (
self.md5 == other.md5
and self.__class__ == other.__class__
and self.Z == other.Z
and self.Z_val == other.Z_val
and self.l_max == other.l_max
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (
self.__class__.__name__,
os.path.relpath(self.filepath),
)
except Exception:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
# pylint: disable=E1101
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
"""Absolute path to pseudopotential file."""
# pylint: disable=E1101
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
# pylint: disable=E1101
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
"""Type of pseudo."""
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
# if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
# pylint: disable=E1101
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
# pylint: disable=E1101
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
# xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
new = cls.from_file(d["filepath"])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError(
"The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5)
)
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
# pylint: disable=E1101
import shutil
import tempfile
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report:
new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
# pylint: disable=E1101
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
# pylint: disable=E1101
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
# if os.path.exists(path): return path
# return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
# pylint: disable=E1101
if not self.has_dojo_report:
return Hint(ecut=0.0, pawecutdg=0.0)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
if "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0.0, pawecutdg=0.0)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from abipy.abio.factories import gs_input
from abipy.core.structure import Structure
from abipy.electrons.psps import PspsFile
from abipy.flowtk import AbinitTask
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None:
pawecutdg = ecut * 4
inp = gs_input(
structure,
pseudos=[self],
ecut=ecut,
pawecutdg=pawecutdg,
spin_mode="unpolarized",
kppa=1,
)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(metaclass=abc.ABCMeta):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(metaclass=abc.ABCMeta):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
# def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
# @property
# def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
# pylint: disable=E1101
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1):
return False
if switch in (2, 3):
return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def nlcc_radius(self):
# pylint: disable=E1101
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
# pylint: disable=E1101
return self._r_cut
# def orbitals(self):
@property
def supports_soc(self):
return True
class Hint:
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
"""Return dictionary for MSONable protocol."""
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.abc.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0:
continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (
line,
keys,
values,
)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super().__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({"pspdat": header["pspd"]})
header.pop("pspd")
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
# if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
# 0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2 * (lmax + 1)):
line = lines[idx]
if idx % 2 == 0:
proj_info = [
line,
]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5, 4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx + 1])
summary = header[0]
header = _dict_from_lines(header, [0, 3, 6, 3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
# print filename, header
# Skip meshes =
lines = lines[2 + num_meshes :]
# for midx in range(num_meshes):
# l = midx + 1
# print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
# print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
# print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser:
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict(
{
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
# 5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
}
)
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
# _FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path):
continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except Exception:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except Exception:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (
filename,
line,
)
logger.critical(msg)
return None
# if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno + 1].split()
pspfmt, creatorID = tokens[:2]
# if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format=pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
# TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
"""
Radial Function class.
"""
pass
class PawXmlSetup(Pseudo, PawPseudo):
"""
Setup class for PawXml.
"""
def __init__(self, filepath):
"""
:param filepath:
"""
# pylint: disable=E1101
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
# self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
# self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
# self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
# <valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
# </valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = OrderedDict()
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
# print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@lazy_property
def root(self):
"""
Root tree of XML.
"""
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
return tree.getroot()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend + 1))
if eq == "r=a*exp(d*i)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == "r=a*i/(n-i)":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [a * i / (n - i) for i in indices]
elif eq == "r=a*(exp(d*i)-1)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == "r=d*i":
d = float(grid_params["d"])
mesh = [d * i for i in indices]
elif eq == "r=(i/n+a)^5/a-a^4":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [(i / n + a) ** 5 / a - a ** 4 for i in indices]
else:
raise ValueError("Unknown grid type: %s" % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
# pylint: disable=E1101
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
# pylint: disable=E1101
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@lazy_property
def ae_core_density(self):
"""The all-electron radial density."""
mesh, values, attrib = self._parse_radfunc("ae_core_density")
return RadialFunction(mesh, values)
@lazy_property
def pseudo_core_density(self):
"""The pseudized radial density."""
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
return RadialFunction(mesh, values)
@lazy_property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
ae_partial_waves = OrderedDict()
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
ae_partial_waves[state] = RadialFunction(mesh, values)
return ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
pseudo_partial_waves = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
pseudo_partial_waves[state] = RadialFunction(mesh, values)
return pseudo_partial_waves
@lazy_property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
projector_functions = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
# val_state = self.valence_states[state]
projector_functions[state] = RadialFunction(mesh, values)
return projector_functions
def yield_figs(self, **kwargs): # pragma: no cover
"""
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
"""
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False)
# yield self.plot_potentials(title="potentials", show=False)
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
# ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else r"$\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, fontsize=12, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: fontsize for legends and titles
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\phi,\, r\tilde\phi\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, fontsize=12, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
# @add_fig_kwargs
# def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(collections.abc.Sequence, MSONable, metaclass=abc.ABCMeta):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls):
return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info("Skipping file %s" % f)
except Exception:
logger.info("Skipping file %s" % f)
if not pseudos:
logger.warning("No pseudopotentials parsed from folder %s" % top)
return None
logger.info("Creating PseudoTable with %i pseudopotentials" % len(pseudos))
else:
if exts is None:
exts = ("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.abc.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
# def max_ecut_pawecutdg(self, accuracy):
# """Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
d = {}
for p in self:
k, count = p.element.name, 1
# k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith("@"):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]:
return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ", ".join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ", ".join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols:
continue
else:
if p.symbol not in symbols:
continue
pseudos.append(p)
if ret_list:
return pseudos
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p):
continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(
table,
headers=["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid",
)
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)):
rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
"""
Return PseudoTable with element beloging to the specified family, e.g. familiy="alkaline"
"""
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
| mit |
courtarro/gnuradio-wg-grc | gr-digital/examples/snr_estimators.py | 46 | 6348 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0/2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s/n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0/scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
Vimos/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 17 | 10084 | import numpy as np
import itertools
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_sparse_encode_shapes_omp():
rng = np.random.RandomState(0)
algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
X_ = rng.randn(n_samples, n_features)
dictionary = rng.randn(n_components, n_features)
for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):
code = sparse_encode(X_, dictionary, algorithm=algorithm,
n_jobs=n_jobs)
assert_equal(code.shape, (n_samples, n_components))
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_equal(dico.components_.shape, (n_components, n_features))
n_components = 1
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_equal(dico.components_.shape, (n_components, n_features))
assert_equal(dico.transform(X).shape, (X.shape[0], n_components))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=-1)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
dblalock/flock | python/viz/motifs.py | 1 | 8148 | #!/bin/env/python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
from ..algo.motif import findMotif, findAllMotifInstances
from ..utils.subseq import simMatFromDistTensor
from viz_utils import plotRect, plotRanges
def findAndPlotMotif(seq, lengths, **kwargs):
motif = findMotif([seq], lengths)
plotMotif(seq, motif, **kwargs)
def findAndPlotMotifInstances(seq, lengths, truthStartEndPairs=None,
saveas=None, findMotifKwargs=None, **kwargs):
# XXX this func will break if seq is a list of seqs, not just one ndarray
if findMotifKwargs:
startIdxs, instances, motif = findAllMotifInstances([seq], lengths,
**findMotifKwargs)
else:
startIdxs, instances, motif = findAllMotifInstances([seq], lengths)
# ------------------------ plot reported motif instances
endIdxs = startIdxs + motif.length
startEndPairs = np.c_[startIdxs, endIdxs]
ax = plotMotifInstances(seq, startEndPairs, **kwargs)
# ------------------------ plot ground truth
if truthStartEndPairs is not None and len(truthStartEndPairs):
try:
if len(truthStartEndPairs[0]) == 1: # single points, not ranges
color = 'k'
truthStartEndPairs = np.asarray(truthStartEndPairs)
truthStartEndPairs = np.c_[truthStartEndPairs, truthStartEndPairs]
else:
color = 'g'
except: # elements are scalars and so len() throws
color = 'k'
truthStartEndPairs = np.asarray(truthStartEndPairs)
truthStartEndPairs = np.c_[truthStartEndPairs, truthStartEndPairs]
# make a vert line (spanning less than full graph height)
# where the labels are
yMin, yMax = np.min(seq), np.max(seq)
yRange = yMax - yMin
lineMin, lineMax = [yMin + frac * yRange for frac in (.4, .6)]
plotMotifInstances(None, truthStartEndPairs, ax=ax,
color=color, linestyle='-', lw=2,
ymin=lineMin, ymax=lineMax)
if saveas:
plt.savefig(saveas)
else:
plt.show()
def plotMotif(seq, motif, showExtracted=True, color='gray',
title=None, saveas=None):
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
# just show where the motif is in the original signal
if not showExtracted:
_, ax = plt.subplots()
ax.autoscale(False)
ax.plot(seq)
plotRect(ax, start1, end1, color=color)
plotRect(ax, start2, end2, color=color)
if saveas:
plt.savefig(saveas)
else:
plt.show()
return ax
# set up axes
ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)
ax2 = plt.subplot2grid((2,2), (1,0))
ax3 = plt.subplot2grid((2,2), (1,1))
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
ax3.autoscale(tight=True)
# plot raw ts on top and motif instances on the bottom
ax1.plot(seq, lw=2)
ax2.plot(motif[0], lw=2)
ax3.plot(motif[1], lw=2)
ax1.set_title('Original Signal')
ax2.set_title('Motif Instance at %d' % start1)
ax3.set_title('Motif Instance at %d' % start2)
# draw rects in the ts where the motif is
plotRect(ax1, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plt.tight_layout()
if saveas:
plt.savefig(saveas)
else:
plt.show()
return ax1, ax2, ax3
def plotMotifInstances(seq, startEndIdxPairs, title=None, ax=None,
saveas=None, **kwargs):
if ax is None:
_, ax = plt.subplots()
# ax.autoscale(False) # makes it not actually work...
if seq is not None and len(seq): # plot original seq if one is provided
ax.plot(seq, **kwargs)
plotRanges(ax, startEndIdxPairs, **kwargs)
if not title:
title = "Motif Instances in Data"
ax.set_title(title)
if seq is not None and len(seq):
ax.set_ylim([np.min(seq), np.max(seq)])
ax.set_xlim([0, len(seq)])
if saveas:
plt.savefig(saveas)
return ax
def showPairwiseSims(origSignal, m, simMat, clamp=True, pruneCorrAbove=-1,
plotMotifs=True, showEigenVect=False, hasPadding=True, saveas=None):
print "origSignal shape", origSignal.shape
# padLen = len(origSignal) - simMat.shape[1]
padLen = m - 1 if hasPadding else 0
subseqLen = m
plt.figure(figsize=(8,10))
if showEigenVect:
ax1 = plt.subplot2grid((20,10), (0,0), colspan=8, rowspan=5)
ax2 = plt.subplot2grid((20,10), (5,0), colspan=8, rowspan=15)
ax3 = plt.subplot2grid((20,10), (5,8), colspan=2, rowspan=15)
ax3.autoscale(tight=True)
ax3.set_title('Extracted')
else:
ax1 = plt.subplot2grid((4,2), (0,0), colspan=2)
ax2 = plt.subplot2grid((4,2), (1,0), colspan=2, rowspan=3)
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
ax1.set_title('Original Signal')
ax1.set_ylabel('Value')
if pruneCorrAbove > 0:
ax2.set_title('Subsequence Cosine Similarities to Dictionary Sequences')
else:
ax2.set_title('Subsequence Pairwise Cosine Similarities')
ax2.set_xlabel('Subsequence Start Index')
ax2.set_ylabel('"Dictionary" Sequence Number')
seq = origSignal
imgMat = simMat
print "imgMat shape: ", imgMat.shape
# # show magnitude of similarities in each row in descending order; there are
# # only about 60 entries > .01 in *any* row for msrc, and way fewer in most
# # plt.figure()
# # thresh = .5
# # sortedSimsByRow = np.sort(imgMat, axis=1)
# # sortedSimsByRow = sortedSimsByRow[:, ::-1]
# # nonzeroCols = np.sum(sortedSimsByRow, axis=0) > thresh # ignore tiny similarities
# # sortedSimsByRow = sortedSimsByRow[:, nonzeroCols]
# # # plt.imshow(sortedSimsByRow)
# # # plt.plot(np.mean(sortedSimsByRow, axis=1))
# # plt.plot(np.sum(sortedSimsByRow > thresh, axis=1)) # entries > thresh per row
# if pruneCorrAbove > 0.:
# print "ImgMat Shape:"
# print imgMat.shape
# imgMat = removeCorrelatedRows(imgMat, pruneCorrAbove)
# print imgMat.shape
# print "NaNs at:", np.where(np.isnan(imgMat))[0]
# print "Infs at:", np.where(np.isinf(imgMat))[0]
# power iteration to see what we get
if showEigenVect:
width = int(subseqLen * 1.5)
nRows, nCols = imgMat.shape
nPositions = nCols - width + 1
if nPositions > 1:
elementsPerPosition = nRows * width # size of 2d slice
dataMat = np.empty((nPositions, elementsPerPosition))
# for i in range(nPositions): # step by 1
for i in range(0, nPositions, width): # step by width, so non-overlapping
startCol = i
endCol = startCol + width
data = imgMat[:, startCol:endCol]
dataMat[i] = data.flatten()
# ah; power iteration is for cov matrix, cuz needs a square mat
# v = np.ones(elementsPerPosition) / elementsPerPosition # uniform start vect
# for i in range(3):
# v = np.dot(dataMat.T, v)
svd = TruncatedSVD(n_components=1, random_state=42)
svd.fit(dataMat)
v = svd.components_[0]
learnedFilt = v.reshape((nRows, width))
ax3.imshow(learnedFilt) # seems to be pretty good
# plt.show()
ax1.plot(seq)
ax2.imshow(imgMat, interpolation='nearest', aspect='auto')
plt.tight_layout()
if plotMotifs:
searchSeq = seq
print "searchSeq shape:", searchSeq.shape
motif = findMotif([searchSeq], subseqLen) # motif of min length
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
ax2.autoscale(False)
color = 'grey'
plotRect(ax1, start1, end1, color=color)
plotRect(ax2, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plotRect(ax2, start2, end2, color=color)
print "imgMat shape: ", imgMat.shape
print "padLen: ", padLen
if padLen:
searchSeq = imgMat[:,:-padLen].T
else:
searchSeq = imgMat.T
print "searchSeq shape:", searchSeq.shape
print "subseqLen:", subseqLen
motif = findMotif([searchSeq], subseqLen) # motif of min length
start1 = motif[3]
start2 = motif[4]
end1 = start1 + len(motif[0]) - 1
end2 = start2 + len(motif[1]) - 1
print [start1, end1, start2, end2]
color = 'm' # magenta
plotRect(ax1, start1, end1, color=color)
plotRect(ax2, start1, end1, color=color)
plotRect(ax1, start2, end2, color=color)
plotRect(ax2, start2, end2, color=color)
if saveas:
plt.savefig(saveas)
else:
plt.show()
if showEigenVect:
return ax1, ax2, ax3
return ax1, ax2
def showPairwiseDists(origSignal, m, Dtensor, **kwargs):
padLen = len(origSignal) - Dtensor.shape[1]
simMat = simMatFromDistTensor(Dtensor, m, padLen)
showPairwiseSims(origSignal, m, simMat, **kwargs)
| mit |
apdjustino/urbansim | urbansim/models/util.py | 1 | 9311 | """
Utilities used within the ``urbansim.models`` package.
"""
import collections
import logging
import numbers
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from tokenize import generate_tokens, NAME
import numpy as np
import pandas as pd
import patsy
from zbox import toolz as tz
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
def apply_filter_query(df, filters=None):
"""
Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame
"""
with log_start_finish('apply filter query: {!r}'.format(filters), logger):
if filters:
if isinstance(filters, str):
query = filters
else:
query = ' and '.join(filters)
return df.query(query)
else:
return df
def _filterize(name, value):
"""
Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str
"""
if name.endswith('_min'):
name = name[:-4]
comp = '>='
elif name.endswith('_max'):
name = name[:-4]
comp = '<'
else:
comp = '=='
result = '{} {} {!r}'.format(name, comp, value)
logger.debug(
'converted name={} and value={} to filter {}'.format(
name, value, result))
return result
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters)
def concat_indexes(indexes):
"""
Concatenate a sequence of pandas Indexes.
Parameters
----------
indexes : sequence of pandas.Index
Returns
-------
pandas.Index
"""
return pd.Index(np.concatenate(indexes))
def has_constant_expr(expr):
"""
Report whether a model expression has constant specific term.
That is, a term explicitly specying whether the model should or
should not include a constant. (e.g. '+ 1' or '- 1'.)
Parameters
----------
expr : str
Model expression to check.
Returns
-------
has_constant : bool
"""
def has_constant(node):
if node.type == 'ONE':
return True
for n in node.args:
if has_constant(n):
return True
return False
return has_constant(patsy.parse_formula.parse_formula(expr))
def str_model_expression(expr, add_constant=True):
"""
We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy.
"""
if not isinstance(expr, str):
if isinstance(expr, collections.Mapping):
left_side = expr.get('left_side')
right_side = str_model_expression(expr['right_side'], add_constant)
else:
# some kind of iterable like a list
left_side = None
right_side = ' + '.join(expr)
if left_side:
model_expression = ' ~ '.join((left_side, right_side))
else:
model_expression = right_side
else:
model_expression = expr
if not has_constant_expr(model_expression):
if add_constant:
model_expression += ' + 1'
else:
model_expression += ' - 1'
logger.debug(
'converted expression: {!r} to model: {!r}'.format(
expr, model_expression))
return model_expression
def sorted_groupby(df, groupby):
"""
Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame.
"""
start = 0
prev = df[groupby].iloc[start]
for i, x in enumerate(df[groupby]):
if x != prev:
yield prev, df.iloc[start:i]
prev = x
start = i
# need to send back the last group
yield prev, df.iloc[start:]
def columns_in_filters(filters):
"""
Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters.
"""
if not filters:
return []
if not isinstance(filters, str):
filters = ' '.join(filters)
columns = []
reserved = {'and', 'or', 'in', 'not'}
for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline):
if toknum == NAME and tokval not in reserved:
columns.append(tokval)
return list(tz.unique(columns))
def _tokens_from_patsy(node):
"""
Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode
"""
for n in node.args:
for t in _tokens_from_patsy(n):
yield t
if node.token:
yield node.token
def columns_in_formula(formula):
"""
Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str
"""
if formula is None:
return []
formula = str_model_expression(formula, add_constant=False)
columns = []
tokens = map(
lambda x: x.extra,
tz.remove(
lambda x: x.extra is None,
_tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))
for tok in tokens:
# if there are parentheses in the expression we
# want to drop them and everything outside
# and start again from the top
if '(' in tok:
start = tok.find('(') + 1
fin = tok.rfind(')')
columns.extend(columns_in_formula(tok[start:fin]))
else:
for toknum, tokval, _, _, _ in generate_tokens(
StringIO(tok).readline):
if toknum == NAME:
columns.append(tokval)
return list(tz.unique(columns))
| bsd-3-clause |
samzhang111/scikit-learn | sklearn/linear_model/logistic.py | 3 | 65828 | """
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
lbin = LabelBinarizer()
Y_binarized = lbin.fit_transform(y)
if Y_binarized.shape[1] == 1:
Y_binarized = np.hstack([1 - Y_binarized, Y_binarized])
w0 = np.zeros((Y_binarized.shape[1], n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_binarized
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
lamontu/data-analysis | pandas/series.py | 1 | 1171 | # -*- coding: utf-8 -*-
from pandas import Series
print("## Create Series using array:")
obj = Series([4, 7, -5, 3])
print(obj)
print(obj.values)
print(obj.index)
print()
print("## Designate the index of a Series:")
obj2 = Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
print(obj2)
print(obj2.index)
print(obj2['a'])
obj2['d'] = 6
print(obj2[['c', 'a', 'd']]) # a list in []
print(obj2[obj2 > 0])
print('b' in obj2)
print('e' in obj2)
print("## Create Series using dictionary:")
sdata = {'Ohio': 45000, 'Texas': 71000, 'Oregon': 16000, 'Utah':5000}
print("### obj3 = Series(sdata):")
obj3 = Series(sdata)
print(obj3)
print()
print("""### Create Series using dictionary and extra index,
the uncompatible part set to 'NaN':""")
print("### obj4 = Series(sdata, index=states):")
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj4 = Series(sdata, index=states)
print(obj4)
print()
print("## Series addition on same index:")
print(obj3 + obj4)
print()
print("## Designate Series name and index name:")
obj4.name = 'population'
obj4.index.name = 'state'
print(obj4)
print()
print("## Replace index:")
obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan']
print(obj)
| gpl-3.0 |
JohannesBuchner/matplotlib-subsets | tests/nestedsetrect_test.py | 1 | 1110 | from matplotlib_subsets import *
def test_nested_example1():
sets = [
set(list('ABCDEFGH')),
set(list('DEFG')),
set(list('E')),
]
setsizes = [len(s) for s in sets]
nestedsets_rectangles(setsizes, labels = [
r'$\mathbf{%s}$ ($%d$)' % (string.ascii_uppercase[i], len(s))
for i, s in enumerate(sets)])
plt.savefig('example_nested.pdf', bbox_inches='tight')
plt.close()
def test_tree_example1():
tree = ((120, '100', None), [
((50, 'A50', None), []),
((50, 'B50', None), [])
])
treesets_rectangles(tree)
plt.savefig('example_tree.pdf', bbox_inches='tight')
plt.close()
def test_tree_example2():
tree = ((120, '100', None),
[((50, 'A50', None),
[((20, 'AX20', None), [((8, 'AXM8', None), [((4, 'AXM4', None), [((2, 'AXM2', None), [])])]), ((8, 'AXN8', None), [])]),
((20, 'AY20', None), [])]),
((50, 'B50', None), [((5, 'Bt', None), [])]*5)
])
plt.figure(figsize=(7,7))
treesets_rectangles(tree)
plt.savefig('example_tree2.pdf', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
test_nested_example1()
test_tree_example1()
test_tree_example2()
| mit |
ClockworkOrigins/m2etis | dependencies/create_virtualenv.py | 1 | 1472 | import virtualenv
import textwrap
import subprocess
import os
import sys
######################
# Add required dependencies to the object dependenciesList.
# Syntax is equivalent to pip requirements files.
# If there are dependencies between packages mind that all packages will be installed in the specified order.
dependenciesList = [
'paramiko==1.10.0',
'numpy==1.7.0',
'matplotlib==1.2.1',
'scipy==0.12.0',
'scikit-learn==0.13.1',
'pyyaml==3.10',
'pymongo==2.5.1'
]
######################
# Get target directory
if len(sys.argv) != 2:
print "Virtualenv creation failed: Target directory required as first argument!"
sys.exit(1)
else:
targetDirectory = sys.argv[1]
# Generate bootstrap script
pipCommand = ""
for dependency in dependenciesList:
# pipCommand += "\t subprocess.call([join(home_dir, 'bin', 'pip'),'install', '--index-url=file://" + os.getcwd() + "/pypi_mirror/simple'," + "'" + dependency + "'])\n"
pipCommand += "\t subprocess.call([join(home_dir, 'bin', 'pip'),'install'," + "'" + dependency + "'])\n"
afterInstallFunctionText = "import subprocess\ndef after_install(options, home_dir):\n" + pipCommand
bootstrapContent = virtualenv.create_bootstrap_script(textwrap.dedent(afterInstallFunctionText))
open('_bootstrap_virtualenv.py', 'w').write(bootstrapContent)
# Run bootstrap script
subprocess.call(['python','_bootstrap_virtualenv.py', '--system-site-packages', targetDirectory])
# Cleanup
#os.remove('_bootstrap_virtualenv.py')
| apache-2.0 |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| agpl-3.0 |
BlueBrain/NeuroM | neurom/view/view.py | 1 | 16854 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 501ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Visualize morphologies."""
import numpy as np
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, FancyArrowPatch, Polygon, Rectangle
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from neurom import NeuriteType, geom
from neurom.core.neuron import iter_neurites, iter_sections, iter_segments
from neurom.core.soma import SomaCylinders
from neurom.core.dataformat import COLS
from neurom.core.types import tree_type_checker
from neurom.morphmath import segment_radius
from neurom.view.dendrogram import Dendrogram, get_size, layout_dendrogram, move_positions
from neurom.view import common
_LINEWIDTH = 1.2
_ALPHA = 0.8
_DIAMETER_SCALE = 1.0
TREE_COLOR = {NeuriteType.basal_dendrite: 'red',
NeuriteType.apical_dendrite: 'purple',
NeuriteType.axon: 'blue',
NeuriteType.soma: 'black',
NeuriteType.undefined: 'green',
NeuriteType.custom5: 'orange',
NeuriteType.custom6: 'orange',
NeuriteType.custom7: 'orange',
NeuriteType.custom8: 'orange',
NeuriteType.custom9: 'orange',
NeuriteType.custom10: 'orange'}
def _plane2col(plane):
"""Take a string like 'xy', and return the indices from COLS.*."""
planes = ('xy', 'yx', 'xz', 'zx', 'yz', 'zy')
assert plane in planes, 'No such plane found! Please select one of: ' + str(planes)
return (getattr(COLS, plane[0].capitalize()),
getattr(COLS, plane[1].capitalize()), )
def _get_linewidth(tree, linewidth, diameter_scale):
"""Calculate the desired linewidth based on tree contents.
If diameter_scale exists, it is used to scale the diameter of each of the segments
in the tree
If diameter_scale is None, the linewidth is used.
"""
if diameter_scale is not None and tree:
linewidth = [2 * segment_radius(s) * diameter_scale
for s in iter_segments(tree)]
return linewidth
def _get_color(treecolor, tree_type):
"""If treecolor set, it's returned, otherwise tree_type is used to return set colors."""
if treecolor is not None:
return treecolor
return TREE_COLOR.get(tree_type, 'green')
def plot_tree(ax, tree, plane='xy',
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA, realistic_diameters=False):
"""Plots a 2d figure of the tree's segments.
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Section or neurom.core.Neurite): plotted tree
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
realistic_diameters(bool): scale linewidths with axis data coordinates
Note:
If the tree contains one single point the plot will be empty
since no segments can be constructed.
"""
plane0, plane1 = _plane2col(plane)
section_segment_list = [(section, segment)
for section in iter_sections(tree)
for segment in iter_segments(section)]
colors = [_get_color(color, section.type) for section, _ in section_segment_list]
if realistic_diameters:
def _get_rectangle(x, y, linewidth):
"""Draw a rectangle to represent a secgment."""
x, y = np.array(x), np.array(y)
diff = y - x
angle = np.arctan2(diff[1], diff[0]) % (2 * np.pi)
return Rectangle(x - linewidth / 2. * np.array([-np.sin(angle), np.cos(angle)]),
np.linalg.norm(diff),
linewidth,
np.rad2deg(angle))
segs = [_get_rectangle((seg[0][plane0], seg[0][plane1]),
(seg[1][plane0], seg[1][plane1]),
2 * segment_radius(seg) * diameter_scale)
for _, seg in section_segment_list]
collection = PatchCollection(segs, alpha=alpha, facecolors=colors)
else:
segs = [((seg[0][plane0], seg[0][plane1]),
(seg[1][plane0], seg[1][plane1]))
for _, seg in section_segment_list]
linewidth = _get_linewidth(
tree,
diameter_scale=diameter_scale,
linewidth=linewidth,
)
collection = LineCollection(segs, colors=colors, linewidth=linewidth, alpha=alpha)
ax.add_collection(collection)
def plot_soma(ax, soma, plane='xy',
soma_outline=True,
linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a 2d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
plane(str): Any pair of 'xyz'
soma_outline(bool): should the soma be drawn as an outline
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
plane0, plane1 = _plane2col(plane)
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.project_cylinder_onto_2d(ax, (plane0, plane1),
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
if soma_outline:
ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius,
color=color, alpha=alpha))
else:
points = [[p[plane0], p[plane1]] for p in soma.iter()]
if points:
points.append(points[0]) # close the loop
x, y = tuple(np.array(points).T)
ax.plot(x, y, color=color, alpha=alpha, linewidth=linewidth)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
bounding_box = geom.bounding_box(soma)
ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]],
[bounding_box[1][plane0], bounding_box[1][plane1]])),
ignore=False)
# pylint: disable=too-many-arguments
def plot_neuron(ax, nrn,
neurite_type=NeuriteType.all,
plane='xy',
soma_outline=True,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA, realistic_diameters=False):
"""Plots a 2D figure of the neuron, that contains a soma and the neurites.
Args:
ax(matplotlib axes): on what to plot
neurite_type(NeuriteType|tuple): an optional filter on the neurite type
nrn(neuron): neuron to be plotted
soma_outline(bool): should the soma be drawn as an outline
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
realistic_diameters(bool): scale linewidths with axis data coordinates
"""
plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth,
color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree(ax, neurite, plane=plane,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha, realistic_diameters=realistic_diameters)
ax.set_title(nrn.name)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
def _update_3d_datalim(ax, obj):
"""Unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually."""
min_bounding_box, max_bounding_box = geom.bounding_box(obj)
xy_bounds = np.vstack((min_bounding_box[:COLS.Z],
max_bounding_box[:COLS.Z]))
ax.xy_dataLim.update_from_data_xy(xy_bounds, ignore=False)
z_bounds = np.vstack(((min_bounding_box[COLS.Z], min_bounding_box[COLS.Z]),
(max_bounding_box[COLS.Z], max_bounding_box[COLS.Z])))
ax.zz_dataLim.update_from_data_xy(z_bounds, ignore=False)
def plot_tree3d(ax, tree,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a figure of the tree in 3d.
If the tree contains one single point the plot will be empty \
since no segments can be constructed.
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Section or neurom.core.Neurite): plotted tree
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
section_segment_list = [(section, segment)
for section in iter_sections(tree)
for segment in iter_segments(section)]
segs = [(seg[0][COLS.XYZ], seg[1][COLS.XYZ]) for _, seg in section_segment_list]
colors = [_get_color(color, section.type) for section, _ in section_segment_list]
linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)
collection = Line3DCollection(segs, colors=colors, linewidth=linewidth, alpha=alpha)
ax.add_collection3d(collection)
_update_3d_datalim(ax, tree)
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):
"""Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.plot_cylinder(ax,
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,
color=color, alpha=alpha)
# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
_update_3d_datalim(ax, soma)
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a figure of the neuron, that contains a soma and a list of trees.
Args:
ax(matplotlib axes): on what to plot
nrn(neuron): neuron to be plotted
neurite_type(NeuriteType): an optional filter on the neurite type
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
plot_soma3d(ax, nrn.soma, color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree3d(ax, neurite,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha)
ax.set_title(nrn.name)
def _get_dendrogram_legend(dendrogram):
"""Generates labels legend for dendrogram.
Because dendrogram is rendered as patches, we need to manually label it.
Args:
dendrogram (Dendrogram): dendrogram
Returns:
List of legend handles.
"""
def neurite_legend(neurite_type):
return Line2D([0], [0], color=TREE_COLOR[neurite_type], lw=2, label=neurite_type.name)
if dendrogram.neurite_type == NeuriteType.soma:
handles = {d.neurite_type: neurite_legend(d.neurite_type)
for d in [dendrogram] + dendrogram.children}
return handles.values()
return [neurite_legend(dendrogram.neurite_type)]
def _as_dendrogram_polygon(coords, color):
return Polygon(coords, color=color, fill=True)
def _as_dendrogram_line(start, end, color):
return FancyArrowPatch(start, end, arrowstyle='-', color=color, lw=2, shrinkA=0, shrinkB=0)
def _get_dendrogram_shapes(dendrogram, positions, show_diameters):
"""Generates drawable patches for dendrogram.
Args:
dendrogram (Dendrogram): dendrogram
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
show_diameter (bool): whether to draw shapes with diameter or as plain lines
Returns:
List of matplotlib.patches.
"""
color = TREE_COLOR[dendrogram.neurite_type]
start_point = positions[dendrogram]
end_point = start_point + [0, dendrogram.height]
if show_diameters:
shapes = [_as_dendrogram_polygon(dendrogram.coords + start_point, color)]
else:
shapes = [_as_dendrogram_line(start_point, end_point, color)]
for child in dendrogram.children:
shapes.append(_as_dendrogram_line(end_point, positions[child], color))
shapes += _get_dendrogram_shapes(child, positions, show_diameters)
return shapes
def plot_dendrogram(ax, obj, show_diameters=True):
"""Plots Dendrogram of `obj`.
Args:
ax: matplotlib axes
obj (neurom.Neuron, neurom.Section): neuron or section
show_diameters (bool): whether to show node diameters or not
"""
dendrogram = Dendrogram(obj)
positions = layout_dendrogram(dendrogram, np.array([0, 0]))
w, h = get_size(positions)
positions = move_positions(positions, np.array([.5 * w, 0]))
ax.set_xlim([-.05 * w, 1.05 * w])
ax.set_ylim([-.05 * h, 1.05 * h])
ax.set_title('Morphology Dendrogram')
ax.set_xlabel('micrometers (um)')
ax.set_ylabel('micrometers (um)')
shapes = _get_dendrogram_shapes(dendrogram, positions, show_diameters)
ax.add_collection(PatchCollection(shapes, match_original=True))
ax.set_aspect('auto')
ax.legend(handles=_get_dendrogram_legend(dendrogram))
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
evgchz/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_LWPR_100/data/trajectories/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 |
jakobworldpeace/scikit-learn | sklearn/utils/multiclass.py | 41 | 14732 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| bsd-3-clause |
r-mart/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
gkioxari/RstarCNN | lib/fast_rcnn/test_stanford40.py | 1 | 10561 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
import scipy.io as sio
import utils.cython_bbox
import pdb
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None, 'secondary_rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
blobs['secondary_rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes, gt_label):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
base_shape = blobs['data'].shape
gt_inds = np.where(gt_label>-1)[0]
num_rois = len(gt_inds)
blobs_rois = blobs['rois'][gt_inds].astype(np.float32, copy=False)
blobs_rois = blobs_rois[:, :, np.newaxis, np.newaxis]
non_gt_inds = np.where(gt_label==-1)[0]
num_sec_rois = len(non_gt_inds)
blobs_sec_rois = blobs['secondary_rois'][non_gt_inds].astype(np.float32, copy=False)
blobs_sec_rois = blobs_sec_rois[:, :, np.newaxis, np.newaxis]
# reshape network inputs
net.blobs['data'].reshape(base_shape[0], base_shape[1],
base_shape[2], base_shape[3])
net.blobs['rois'].reshape(num_rois, 5, 1, 1)
net.blobs['secondary_rois'].reshape(num_sec_rois, 5, 1, 1)
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs_rois,
secondary_rois = blobs_sec_rois)
scores = blobs_out['cls_score']
secondary_scores = blobs_out['context_cls_score']
gt_boxes = boxes[gt_inds]
sec_boxes = boxes[non_gt_inds]
# Compute overlap
boxes_overlaps = \
utils.cython_bbox.bbox_overlaps(sec_boxes.astype(np.float),
gt_boxes.astype(np.float))
selected_boxes = np.zeros((scores.shape[1], 4, gt_boxes.shape[0]))
# Sum of Max
for i in xrange(gt_boxes.shape[0]):
keep_inds = np.where((boxes_overlaps[:,i]>=cfg.TEST.IOU_LB) &
(boxes_overlaps[:,i]<=cfg.TEST.IOU_UB))[0]
if keep_inds.size > 0:
this_scores = np.amax(secondary_scores[keep_inds,:], axis=0)
scores[i,:] = scores[i,:]+this_scores
winner_ind = np.argmax(secondary_scores[keep_inds,:], axis=0)
selected_boxes[:,:,i] = sec_boxes[keep_inds[winner_ind]]
# Softmax
scores = np.exp(scores-np.amax(scores))
scores = scores / np.array(np.sum(scores, axis=1), ndmin=2).T
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(gt_boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
return scores, secondary_scores, selected_boxes
def vis_detections(im, boxes, scores, classes):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(1):
pdb.set_trace()
bbox = boxes[i, :4]
sscore = scores[i, :]
cls_ind = sscore.argmax()
sscore = sscore.max()
#plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.title('{} {:.3f}'.format(classes[cls_ind], sscore))
plt.show()
def test_net(net, imdb):
"""Test a R*CNN network on an image database."""
num_images = len(imdb.image_index)
num_classes = imdb.num_classes
all_scores = {}
all_labels = {}
for a in xrange(num_classes):
all_scores[imdb.classes[a]] = np.zeros((num_images,1), dtype = np.float32)
all_labels[imdb.classes[a]] = -np.ones((num_images,1), dtype = np.int16)
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
gt = np.where(roidb[i]['gt_classes']>-1)[0]
gt_boxes = roidb[i]['boxes'][gt]
gt_class = roidb[i]['gt_classes'][gt]
assert (gt_boxes.shape[0]==1)
_t['im_detect'].tic()
scores, secondary_scores, selected_boxes = im_detect(net, im, roidb[i]['boxes'], roidb[i]['gt_classes'])
_t['im_detect'].toc()
# Visualize detections
# vis_detections(im, gt_boxes, scores, imdb.classes)
for a in xrange(num_classes):
all_scores[imdb.classes[a]][i] = scores[0,a]
all_labels[imdb.classes[a]][i] = gt_class
print 'im_detect: {:d}/{:d} {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time)
#f = {'images': imdb.image_index, 'scores': all_boxes, 'classes': imdb.classes, 'context_boxes': all_selected_boxes}
#output_dir = os.path.join(os.path.dirname(__file__), 'output',
# 'Action_MIL_wContextBoxes_'+ imdb.name)
#if not os.path.exists(output_dir):
# os.makedirs(output_dir)
#det_file = os.path.join(output_dir, 'res.mat')
#sio.savemat(det_file, {'data': f})
#print 'Saving in', det_file
imdb._ap(all_scores, all_labels)
| bsd-2-clause |
av8ramit/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 7 | 54392 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(expected_features, expected_labels, actual_features,
actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {k: constant_op.constant(v)
for k, v in six.iteritems(features)}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn,
params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features,
labels,
mode,
params,
config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(
dtype=dtypes.string, shape=[None], name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(features, labels, {
'examples': serialized_tf_example
})
est.export_savedmodel(
os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError, 'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(
model_fn=linear_model_fn, model_dir='test_dir', config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir, model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(['model.ckpt-1', 'model.ckpt-5'],
ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1, model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2, model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={
'learning_rate': 0.01
}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(
input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'MSE': _streaming_mean_squared_error_histogram
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testSummaryWritingWithTensor(self):
def _streaming_precition_mean_tensor(predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
return metric_ops.streaming_mean_tensor(
predictions,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'PMT': _streaming_precition_mean_tensor
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('PMT' in output_values)
self.assertTrue(output_values['PMT'].HasField('tensor'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(['bogus_lookup', 'feature'], [
compat.as_str_any(x)
for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)
])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])
])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)
)
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
jms-dipadua/financial-forecasting | forecast_dir.py | 1 | 20535 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import yaml
#import h5py
from sklearn import svm
from sklearn.metrics import f1_score, accuracy_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split, StratifiedKFold, KFold
from sklearn.learning_curve import learning_curve
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
from keras.models import Sequential, model_from_yaml
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
#from keras.utils.visualize_util import plot
#import pydot
#import graphviz
class Company:
def __init__(self):
self.get_params()
self.read_file()
self.initial_data_drop()
self.gen_train_test()
def get_params(self):
print "welcome to the jungle."
self.base_file = raw_input("RAW COMPANY file: ") # base file
self.root_dir = 'data/working/v6/' # version directory
self.fin_dir = 'data/outputs/v6/' # version directory
self.experiment_version = raw_input("Experiment Version: ")
self.fin_file_name = self.fin_dir + self.experiment_version +'.csv' # --> USE THE ROOT EXP FOR FILES? OR JUST ONE OUPUT?
self.pl_file_name = self.fin_dir + self.experiment_version +'_pl.csv'
def read_file(self):
print "reading file"
self.raw_data = pd.read_csv(self.root_dir+self.base_file)
def initial_data_drop(self):
self.raw_data2 = self.raw_data
print "initial_data_drop (IDs & Dates)"
columns = list(self.raw_data.columns.values)
if 'id' in columns:
self.raw_data = self.raw_data.drop(['id'], axis=1)
if 'Volume' in columns:
self.raw_data = self.raw_data.drop(['Volume'], axis=1)
if 'Date' in columns:
self.raw_dates = self.raw_data['Date']
#print self.raw_dates
self.raw_data = self.raw_data.drop(['Date'], axis=1)
# the following section is for experiment customization: ie selection of which inputs to keep or drop
columns = list(self.raw_data.columns.values)
#drop_cols = []
#drop_col_nums =[] # use this so i can make it more reliable for future experiments (i.e. when dropping the same columns across different companies)
counter = 0
# get the columns to keep (manual version)
"""
drop_cols = []
drop_col_nums = []
for column in columns:
print "Keep (1) or DROP (0): %r" % column
if int(raw_input()) == 0:
drop_cols.append(column)
drop_col_nums.append(counter)
counter += 1
print drop_cols # so i can keep track of this for experiment documentation purposes
print drop_col_nums
"""
# v5-1
#drop_cols = ['DGS10', 'DCOILBRENTEU', 'xCIVPART', 'UNRATE', 'CPIAUCSL', 'GFDEGDQ188S', 'HOUST', 'IC4WSA', 'USD3MTD156N', 'PCE', 'PSAVERT', 'xA191RL1Q225SBEA', 'spClose', 'DEXUSEU', 'EPS', '12mo-EPS', 'net_income', 'total_assets', 'total_revenue', 'free_cash_flow', 'total_liabilities', 'profit_margin']
#drop_col_nums = [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
# v5-2
#drop_cols = ['Open', 'High', 'Low', 'SMA-5', 'SMA-15', 'SMA-50', 'SMA-200', 'WMA-10', 'WMA-30', 'WMA-100', 'WMA-200', 'cci-20', 'rsi-14']
#drop_col_nums = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
#self.raw_data.drop(self.raw_data.columns[drop_col_nums], axis = 1, inplace=True)
# v5-3 ("all params")
print list(self.raw_data.columns.values) # again for documentation purposes
def gen_train_test(self):
# split timeseries
print "generating x_train, y_train, x_test"
data_shape = self.raw_data.shape[0]
print "data_shape of raw_data: %r" % data_shape
train_len = int(round(data_shape * .9)) # get 90% of data for train (everything but 2015)
print "train_len of raw_data: %r" % train_len
# get rid of any NaN that may have appeared in there
self.raw_data.replace(to_replace = np.nan, value = 0, inplace=True)
X_train = self.raw_data.ix[0:train_len-1, :] # one less than train_len; train_len will be start of test
X_train2 = self.raw_data2.ix[0:train_len-1, :]
# last row of data set won't have a prior-day but that's okay, we can just drop it (since there's no way to validate it)
X_test = self.raw_data.ix[train_len:data_shape-2, :] # ones less than data_shape because of 0-index + row dropping
X_test2 = self.raw_data2.ix[train_len:data_shape-2, :]
# generate / extract y_vals : y_train & y_valid
# first row has no prior-day information but day 2 is its y-val
y_vals_raw = self.raw_data.loc[:,['Close']]
# RENAME AXIS
y_vals_raw.rename(columns={'Close': 'nxtDayClose'}, inplace=True)
# we need to know what "yesterday" was so that we can determine if today is an "up-day" or a "down-day"
y_vals_labels = np.zeros(y_vals_raw.shape)
for i in range(1:y_vals_lables.shape[0]): # skip 0 because we don't need it
if y_vals_raw.ix[i] > y_vals_raw.ix[i-1]:
y_vals_labels[,i] = 1 # 1 == "up [day]" -- so a boolean on whether it's an up-day or not...
else:
y_vals_labels[,i] = 0 # 0 == "down [day]"
# zero indexing takes care of needing to manipulate by one here
# drop first day because need "day +1" close price as the "train target" based on "day's feature inputs"
y_train = y_vals_labels.ix[1:train_len]
y_test = y_vals_labels.ix[train_len+1:data_shape-1, :] # but as with X_test, we will later drop the last row
self.X_train = X_train
self.X_train2 = X_train2
# also do checks on head / tail
# to test head, swap head for tail
# commented out when not testing
#print self.X_train.tail(5)
self.y_train = y_train.as_matrix() # make sure they're matrix/vectors
#print self.y_train[-5:-1]
self.X_test = X_test
self.X_test2 = X_test2
#print self.X_test.tail(5)
self.y_test = y_test.as_matrix()
#print self.y_valid[-1]
self.y_dates = self.raw_dates.ix[train_len+1:data_shape-1].as_matrix()
# last step is to generate a cross-validation set
# since we're in time series, we can't randomize (hence this process and not sci-kit...)
# we'll dedicate 90% of data set to train, 10% to cross-validation
data_shape = self.X_train.shape[0]
train_len = int(round(data_shape * .9))
X_train = self.X_train[0: train_len - 1]
X_cv = self.X_train[train_len: data_shape]
self.X_train = X_train
self.X_cv = X_cv
y_train = self.y_train[0: train_len-1]
y_cv = self.y_train[train_len: data_shape]
self.y_train = y_train
self.y_cv = y_cv
print "shapes of final train/tests: \n x_train: %r \n y_train: %r \n x_cv: %r \n y_cv: %r \n x_test: %r \n y_test: %r" % (X_train.shape, y_train.shape, X_cv.shape, y_cv.shape, X_test.shape, y_test.shape)
return
class Forecast:
def __init__(self):
self.company = Company()
self.basic_vis()
self.pre_process_data() #v1.x-ish: scaling, PCA, etc
self.svm() # uses self.company.X_train/test, etc
self.ann() # uses self.company.X_train/test, etc
# self.ensemble() # v1.x
self.svm_decisions, self.svm_gain_loss = self.decisions(self.svm_preds) # this has to ouptut // generate a notion of "shares held"
self.ann_decisions, self.ann_gain_loss = self.decisions(self.ann_preds) # this has to ouptut // generate a notion of "shares held"
self.buy_hold_prof_loss()
self.profit_loss_rollup()
self.write_final_file()
def pre_process_data(self):
# some STRUCTURE and CLEAN UP
# convert to numpy and numbers
self.company.y_train = np.array(self.company.y_train[0:,0]) # need to recast for some reason...
self.company.y_train = self.company.y_train.astype(float)
# so do y_valid too
self.company.y_test = np.array(self.company.y_test[0:,0])
# company.y_valid is an object..not sure...but this converts it
self.company.y_test = self.company.y_test.astype(float)
#print self.company.y_valid.dtype
# SCALE input values ...not sure if i should do the target...
scaler = StandardScaler()
self.daily_highs = self.company.X_test2['High']
self.daily_lows = self.company.X_test2['Low']
self.company.X_train = scaler.fit_transform(self.company.X_train)
self.company.X_test = scaler.fit_transform(self.company.X_test)
self.company.X_cv = scaler.fit_transform(self.company.X_cv)
# make true train and CV split
#self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.company.X_train, self.company.y_train, test_size=0.33, random_state=42)
return
def basic_vis(self):
correlations = self.company.X_train.corr() # uses pandas built in correlation
# Generate a mask for the upper triangle (cuz it's just distracting)
mask = np.zeros_like(correlations, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# need company ticker for title
ticker = self.company.base_file.split('-').[0]
plt.title("Feature Correlations: " + ticker)
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(correlations, mask=mask, cmap=cmap, vmax=.3,
square=False, xticklabels=3, yticklabels=True,
linewidths=.6, cbar_kws={"shrink": .5}, ax=ax)
plt.yticks(rotation=0)
#plt.show()
f.savefig(self.company.fin_dir + '/correlation-images/' + self.company.experiment_version+'.png')
def svm(self):
# for regression problems, scikitlearn uses SVR: support vector regression
C_range = np.logspace(0, 4, 6) # normally 12; doing 6 for now due to run-time length
#print C_range
gamma_range = np.logspace(-5, 1, 6) # normally 12; doing 6 for now due to run-time length
#print gamma_range
param_grid = dict(gamma=gamma_range, C=C_range)
# based on LONG test with the gridsearch (see notes) for v4b-5
# below is rounded numbers
#param_grid = dict(C=[432876], gamma=[1.8738])
## probably want to introduce max iterations...
grid = GridSearchCV(svm.SVM(kernel='rbf', verbose=True), param_grid=param_grid, cv=2, scoring = 'mean_squared_error')
grid.fit(self.company.X_train, self.company.y_train)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
self.svm_preds = grid.predict(self.company.X_test)
# this is for repeating or one-off specific experiments
#self.svm_C = float(raw_input("input C val: "))
#self.svm_gamma = float(raw_input("input gamma val: "))
#regression = svm.SVR(kernel='rbf', C=self.svm_C, gamma=self.svm_gamma, verbose=True)
#regression.fit(self.X_train, self.y_train)
#self.svm_preds = regression.predict(self.company.X_test)
#print self.svm_preds
self.svm_mse_cv = grid.score(self.company.X_cv, self.company.y_cv)
print "(cv) Mean Squared Error: %f" % self.svm_mse_cv
self.svm_mse_test = grid.score(self.company.X_cv, self.company.y_cv)
print "(test) Mean Squared Error: %f" % self.svm_mse_test
# save the parameters to a file
joblib.dump(grid.best_estimator_, self.company.fin_dir + '/svm-models/' + self.company.experiment_version +'_svm_model.pkl')
# visualize results
plt.figure()
plt.title("SVM Learning Curve: " + self.company.experiment_version)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
grid.best_estimator_, self.company.X_train, self.company.y_train, cv=5, train_sizes=[50, 100, 200, 300, 400, 500, 600])
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std,
alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
plt.savefig(self.company.fin_dir + '/svm-learning-curves/' + self.company.experiment_version+'.png')
def ann(self):
#print self.company.X_train.shape[1]
model = Sequential()
model.add(Dense(input_dim=self.company.X_train.shape[1], output_dim=50, init="glorot_uniform"))
#model.add(Activation('tanh'))
model.add(Dropout(0.1))
model.add(Dense(input_dim=50, output_dim=10, init="uniform"))
model.add(Activation('tanh'))
#model.add(Dropout(0.5))
model.add(Dense(input_dim=10, output_dim=1, init="glorot_uniform"))
model.add(Activation("tanh"))
sgd = SGD(lr=0.3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='rmsprop')
early_stopping = EarlyStopping(monitor='val_loss', patience=110)
model.fit(self.company.X_train, self.company.y_train, nb_epoch=1000, validation_split=.1, batch_size=16, verbose = 1, show_accuracy = True, shuffle = False, callbacks=[early_stopping])
self.ann_mse = model.evaluate(self.company.X_cv, self.company.y_cv, show_accuracy=True, batch_size=16)
print self.ann_mse
self.ann_preds = model.predict(self.company.X_test)
yaml_string = model.to_yaml()
with open(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_model.yml', 'w+') as outfile:
outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
#model.save_weights(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_weights')
"""
nb_features = self.company.X_train.shape[1]
X_train = self.company.X_train.reshape(self.company.X_train.shape + (1, ))
X_test = self.company.X_test.reshape(self.company.X_test.shape + (1, ))
print X_train.shape
model = Sequential()
model.add(Convolution1D(nb_filter = 24, filter_length = 1, input_shape =(nb_features,1) ))
model.add(Activation("tanh"))
model.add(Dropout(0.2)) # some dropout to help w/ overfitting
model.add(Convolution1D(nb_filter = 48, filter_length= 1, subsample_length= 1))
model.add(Activation("tanh"))
model.add(Convolution1D(nb_filter = 96, filter_length= 1, subsample_length=1))
model.add(Activation("tanh"))
model.add(Dropout(0.3))
model.add(Convolution1D(nb_filter = 192, filter_length= 1, subsample_length=1))
model.add(Activation("tanh"))
model.add(Dropout(0.6))
model.add(MaxPooling1D(pool_length=2))
# flatten to add dense layers
model.add(Flatten())
#model.add(Dense(input_dim=nb_features, output_dim=50))
model.add(Dense(nb_features * 2))
model.add(Activation("tanh"))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation("linear"))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='sgd')
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model.fit(X_train, self.company.y_train, nb_epoch=50, validation_split=0.25, verbose = 1, callbacks=[early_stopping])
self.ann_preds = model.predict(X_test)
"""
#print self.ann_preds
#print "Trained ANN Score: %r" % score
# visualize
#plot(model, to_file= '/ann-training/' + self.company.fin_file_name + '.png')
return
def ensemble(self):
return
def decisions(self, predictions):
# intializations: self.shares_held = 0 & buy_price = 0
self.shares_held = 0
self.buy_price = 0
decisions = []
gain_loss = []
num_preds = predictions.shape[0]
#print "total number of predictions: %f" % num_preds
#print "shape of y_test: %f " % self.company.y_test.shape
# loop through each prediction and make a purchase decision
# uses for-i loop because i want to use the int for indexing within
for i in range(0,num_preds):
# SETUP
# the actual close value
actual_close = round(self.company.y_test[i],3)
day_high = self.daily_highs.iloc[i]
day_low = self.daily_lows.iloc[i]
# the previous close, pulled from y_train (for first row of x) and y_test
if i == 0:
prv_close = round(self.company.y_train[-1],3)
else:
prv_close = round(self.company.y_test[i-1],3)
#print "%r :: %r" % (prv_close, predictions[i])
# *have* to liquidate on the last day
if (i == num_preds -1) and (self.shares_held > 0):
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held )
decisions.append("final_day_liquidation")
break
# ACTUAL DECISIONS
# buy
if predictions[i] > prv_close and self.shares_held == 0:
# have to fabricate a buy price: using mean of prv close & actual close...seems sort of realistic...could do mean of high, low, open too...
self.buy_price = round((day_high + day_low) / 2, 3)
self.shares_held = int(round(1000 / self.buy_price))
#print "shares purchased: %r at %r" % (self.shares_held, self.buy_price)
#print "actual close: %r :: predicted close: %r :: previous close: %r " % (actual_close, predictions[i], prv_close)
decisions.append("purchase")
# sells (stop loss)
elif (self.buy_price > prv_close) and (self.shares_held > 0):
# stop loss check; if not > 3% loss, then no change
if (prv_close / self.buy_price) < .97:
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held)
# reset holdings
self.shares_held = 0
self.buy_price = 0
decisions.append("stop_loss_sell")
else: # could do dollar cost averaging here (if wanted to get fancy)
decisions.append("Hold")
# sells (stop gain)
elif (self.buy_price < prv_close) and (self.shares_held > 0):
# stop gain check; if not > 10% gain, then no change
if (prv_close / self.buy_price) > 1.09:
sell_price = (day_high + day_low) / 2 # mean of prv & actual..."market-ish price"
gain_loss.append(sell_price * self.shares_held - self.buy_price * self.shares_held )
self.shares_held = 0
self.buy_price = 0
decisions.append("stop_gain_sell")
else:
decisions.append("Hold")
else:
decisions.append("Hold")
#print decisions
return decisions, gain_loss
def profit_loss_rollup(self):
# could output something like shares purchased / sold, cost basis & exit-price
# for now just a single line
columns = ["Profit/Loss"]
index = ["BUY-HOLD", "SVM", "ANN", "SVM-MSE-CV", "SVM-MSE-TEST", "ANN-MSE"]
self.profit_df = [self.bh_pl, np.sum(self.svm_gain_loss), np.sum(self.ann_gain_loss), self.svm_mse_cv, self.svm_mse_test, self.ann_mse]
self.profit_df = pd.DataFrame(self.profit_df, index=index, columns=columns)
print "Buy & Hold profit/loss %r" % self.bh_pl
#print self.svm_decisions
print "SVM profit/loss %r" % np.sum(self.svm_gain_loss)
#print self.ann_decisions
print "ANN profit/loss %r" % np.sum(self.ann_gain_loss)
return
def buy_hold_prof_loss(self):
# buy price somewhere (mean) between the previous two period close prices
buy_price = round((self.company.y_test[0] + self.company.y_train[-1]) / 2,3)
shares_purchased = int(round(1000/ buy_price, 0))
# sell price somewhere (mean) between the previous two perioud close prices
sell_price = round((self.company.y_test[-2] + self.company.y_test[-1]) /2 ,3)
self.bh_pl = sell_price * shares_purchased - buy_price * shares_purchased
return
def write_final_file(self):
columns = ['Actual', 'SVM', 'ANN', 'SVM-decisons', 'ANN-decisions']
# going to make a data frame to print to a csv
# but preds were not all in the same shape
# this helps with that and merges them all up
self.final_df = np.vstack((self.company.y_test, self.svm_preds))
self.final_df = np.transpose(self.final_df)
self.final_df = np.hstack((self.final_df, self.ann_preds))
#print self.final_df.shape
#print np.array( [self.svm_decisions] ).shape
self.final_df = np.hstack((self.final_df, np.transpose(np.array( [self.svm_decisions] )) ))
self.final_df = np.hstack((self.final_df, np.transpose(np.array( [self.ann_decisions] )) ))
self.final_df = pd.DataFrame(self.final_df, columns=columns)
self.final_df['Date'] = self.company.y_dates
final_file = self.final_df.to_csv(self.company.fin_file_name,index_label='id')
pl_fin_file = self.profit_df.to_csv(self.company.pl_file_name, index=True)
return
if __name__ == "__main__":
forecast = Forecast() | gpl-3.0 |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/computation/scope.py | 24 | 9002 | """Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas as pd
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pd.lib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
return '%s(scope=%s, resolvers=%s)' % (type(self).__name__, scope_keys,
res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| gpl-2.0 |
tehtechguy/mHTM | dev/mnist_novelty_detection/mnist_novelty_detection.py | 1 | 12417 | # mnist_novelty_detection.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 03/13/16
#
# Description : Experiment for using the SP for novelty detection.
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
"""
Experiment for using the SP for novelty detection with MNIST.
G{packagetree mHTM}
"""
__docformat__ = 'epytext'
# Native imports
import os, cPickle, json, sys
# Third party imports
import numpy as np
from scipy.stats import uniform, randint
from sklearn.svm import OneClassSVM
# Program imports
from mHTM.region import SPRegion
from mHTM.datasets.loader import load_mnist
from mHTM.parallel import create_runner, execute_runner, ParamGenerator
from mHTM.metrics import SPMetrics
def parallel_params(log_dir, niter=10000, seed=123456789):
"""
Create the parameters for a parallel run.
@param log_dir: The directory to store the results in.
@param niter: The number of iterations to perform.
@param seed: The seed for the random number generators.
@return: Returns a tuple containing the parameters.
"""
static_params = {
'ninputs': 784,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'pct_active': None,
'random_permanence': True,
'pwindow': 0.5,
'global_inhibition': True,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'nepochs': 10
}
dynamic_params = {
'ncolumns': randint(500, 3500),
'nactive': uniform(0.5, 0.35), # As a % of the number of columns
'nsynapses': randint(25, 784),
'seg_th': uniform(0, 0.2), # As a % of the number of synapses
'log_dir': log_dir
}
# Build the parameter generator
gen = ParamGenerator(dynamic_params, niter, 1, 784)
params = {key:gen for key in dynamic_params}
return static_params, params
def static_params(log_dir, seed=123456789):
"""
Create the parameters for a parallel run.
@param log_dir: The directory to store the results in.
@param seed: The seed for the random number generators.
@return: The configuration parameters.
"""
params = {
'ninputs': 784,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'pct_active': None,
'random_permanence': True,
'pwindow': 0.5,
'global_inhibition': True,
'ncolumns': 784,
'nactive': 39,
'nsynapses': 50,
'seg_th': 0,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'nepochs': 10,
'log_dir': log_dir
}
return params
def base_experiment(config, ntrials=1, seed=123456789):
"""
Run a single experiment, locally.
@param config: The configuration parameters to use for the SP.
@param ntrials: The number of times to repeat the experiment.
@param seed: The random seed to use.
@return: A tuple containing the percentage errors for the SP's training
and testing results and the SVM's training and testing results,
respectively.
"""
# Base parameters
ntrain, ntest = 800, 200
clf_th = 0.5
# Seed numpy
np.random.seed(seed)
# Get the data
(tr_x, tr_y), (te_x, te_y) = load_mnist()
tr_x_0 = np.random.permutation(tr_x[tr_y == 0])
x_tr = tr_x_0[:ntrain]
x_te = tr_x_0[ntrain:ntrain + ntest]
outliers = [np.random.permutation(tr_x[tr_y == i])[:ntest] for i in
xrange(1, 10)]
# Metrics
metrics = SPMetrics()
# Get the metrics for the datasets
u_x_tr = metrics.compute_uniqueness(x_tr)
o_x_tr = metrics.compute_overlap(x_tr)
c_x_tr = 1 - metrics.compute_distance(x_tr)
u_x_te = metrics.compute_uniqueness(x_te)
o_x_te = metrics.compute_overlap(x_te)
c_x_te = 1 - metrics.compute_distance(x_te)
u_y_te, o_y_te, c_y_te = [], [], []
for outlier in outliers:
u_y_te.append(metrics.compute_uniqueness(outlier))
o_y_te.append(metrics.compute_overlap(outlier))
c_y_te.append(1 - metrics.compute_distance(outlier))
# Initialize the overall results
sp_x_results = np.zeros(ntrials)
sp_y_results = [np.zeros(ntrials) for _ in xrange(9)]
svm_x_results = np.zeros(ntrials)
svm_y_results = [np.zeros(ntrials) for _ in xrange(9)]
# Iterate across the trials:
for nt in xrange(ntrials):
# Make a new seeod
seed2 = np.random.randint(1000000)
config['seed'] = seed2
# Create the SP
sp = SPRegion(**config)
# Fit the SP
sp.fit(x_tr)
# Get the SP's output
sp_x_tr = sp.predict(x_tr)
sp_x_te = sp.predict(x_te)
sp_y_te = [sp.predict(outlier) for outlier in outliers]
# Get the metrics for the SP's results
u_sp_x_tr = metrics.compute_uniqueness(sp_x_tr)
o_sp_x_tr = metrics.compute_overlap(sp_x_tr)
c_sp_x_tr = 1 - metrics.compute_distance(sp_x_tr)
u_sp_x_te = metrics.compute_uniqueness(sp_x_te)
o_sp_x_te = metrics.compute_overlap(sp_x_te)
c_sp_x_te = 1 - metrics.compute_distance(sp_x_te)
u_sp_y_te, o_sp_y_te, c_sp_y_te = [], [], []
for y in sp_y_te:
u_sp_y_te.append(metrics.compute_uniqueness(y))
o_sp_y_te.append(metrics.compute_overlap(y))
c_sp_y_te.append(1 - metrics.compute_distance(y))
# Log all of the metrics
sp._log_stats('Input Base Class Train Uniqueness', u_x_tr)
sp._log_stats('Input Base Class Train Overlap', o_x_tr)
sp._log_stats('Input Base Class Train Correlation', c_x_tr)
sp._log_stats('Input Base Class Test Uniqueness', u_x_te)
sp._log_stats('Input Base Class Test Overlap', o_x_te)
sp._log_stats('Input Base Class Test Correlation', c_x_te)
sp._log_stats('SP Base Class Train Uniqueness', u_sp_x_tr)
sp._log_stats('SP Base Class Train Overlap', o_sp_x_tr)
sp._log_stats('SP Base Class Train Correlation', c_sp_x_tr)
sp._log_stats('SP Base Class Test Uniqueness', u_sp_x_te)
sp._log_stats('SP Base Class Test Overlap', o_sp_x_te)
sp._log_stats('SP Base Class Test Correlation', c_sp_x_te)
for i, (a, b, c, d, e, f) in enumerate(zip(u_y_te, o_y_te, c_y_te,
u_sp_y_te, o_sp_y_te, c_sp_y_te), 1):
sp._log_stats('Input Novelty Class {0} Uniqueness'.format(i), a)
sp._log_stats('Input Novelty Class {0} Overlap'.format(i), b)
sp._log_stats('Input Novelty Class {0} Correlation'.format(i), c)
sp._log_stats('SP Novelty Class {0} Uniqueness'.format(i), d)
sp._log_stats('SP Novelty Class {0} Overlap'.format(i), e)
sp._log_stats('SP Novelty Class {0} Correlation'.format(i), f)
# Get average representation of the base class
sp_base_result = np.mean(sp_x_tr, 0)
sp_base_result[sp_base_result >= 0.5] = 1
sp_base_result[sp_base_result < 1] = 0
# Averaged results for each metric type
u_sp_base_to_x_te = 0.
o_sp_base_to_x_te = 0.
c_sp_base_to_x_te = 0.
u_sp, o_sp, c_sp = np.zeros(9), np.zeros(9), np.zeros(9)
for i, x in enumerate(sp_x_te):
xt = np.vstack((sp_base_result, x))
u_sp_base_to_x_te += metrics.compute_uniqueness(xt)
o_sp_base_to_x_te += metrics.compute_overlap(xt)
c_sp_base_to_x_te += 1 - metrics.compute_distance(xt)
for j, yi in enumerate(sp_y_te):
yt = np.vstack((sp_base_result, yi[i]))
u_sp[j] += metrics.compute_uniqueness(yt)
o_sp[j] += metrics.compute_overlap(yt)
c_sp[j] += 1 - metrics.compute_distance(yt)
u_sp_base_to_x_te /= ntest
o_sp_base_to_x_te /= ntest
c_sp_base_to_x_te /= ntest
for i in xrange(9):
u_sp[i] /= ntest
o_sp[i] /= ntest
c_sp[i] /= ntest
# Log the results
sp._log_stats('Base Train to Base Test Uniqueness',
u_sp_base_to_x_te)
sp._log_stats('Base Train to Base Test Overlap', o_sp_base_to_x_te)
sp._log_stats('Base Train to Base Test Correlation', c_sp_base_to_x_te)
for i, j in enumerate(xrange(1, 10)):
sp._log_stats('Base Train to Novelty {0} Uniqueness'.format(j),
u_sp[i])
sp._log_stats('Base Train to Novelty {0} Overlap'.format(j),
o_sp[i])
sp._log_stats('Base Train to Novelty {0} Correlation'.format(j),
c_sp[i])
# Create an SVM
clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed2)
# Evaluate the SVM's performance
clf.fit(x_tr)
svm_x_te = len(np.where(clf.predict(x_te) == 1)[0]) / float(ntest) * \
100
svm_y_te = np.array([len(np.where(clf.predict(outlier) == -1)[0]) /
float(ntest) * 100 for outlier in outliers])
# Perform classification using overlap as the feature
# -- The overlap must be above 50%
clf_x_te = 0.
clf_y_te = np.zeros(9)
for i, x in enumerate(sp_x_te):
xt = np.vstack((sp_base_result, x))
xo = metrics.compute_overlap(xt)
if xo >= clf_th: clf_x_te += 1
for j, yi in enumerate(sp_y_te):
yt = np.vstack((sp_base_result, yi[i]))
yo = metrics.compute_overlap(yt)
if yo < clf_th: clf_y_te[j] += 1
clf_x_te = (clf_x_te / ntest) * 100
clf_y_te = (clf_y_te / ntest) * 100
# Store the results as errors
sp_x_results[nt] = 100 - clf_x_te
sp_y_results[nt] = 100 - clf_y_te
svm_x_results[nt] = 100 - svm_x_te
svm_y_results[nt] = 100 - svm_y_te
# Log the results
sp._log_stats('SP % Correct Base Class', clf_x_te)
sp._log_stats('SVM % Correct Base Class', svm_x_te)
for i, j in enumerate(xrange(1, 10)):
sp._log_stats('SP % Correct Novelty Class {0}'.format(j),
clf_y_te[i])
sp._log_stats('SVM % Correct Novelty Class {0}'.format(j),
svm_y_te[i])
sp._log_stats('SP % Mean Correct Novelty Class', np.mean(clf_y_te))
sp._log_stats('SVM % Mean Correct Novelty Class', np.mean(svm_y_te))
sp._log_stats('SP % Adjusted Score', (np.mean(clf_y_te) * clf_x_te) /
100)
sp._log_stats('SVM % Adjusted Score', (np.mean(svm_y_te) * svm_x_te) /
100)
return sp_x_results, sp_y_results, svm_x_results, svm_y_results
def slurm_prep(log_dir, niter=10000, partition_name='debug',
this_dir=os.getcwd()):
"""
Prep the SLRUM runs.
@param log_dir: The directory to store the results in.
@param niter: The number of iterations to perform.
@param partition_name: The partition name of the cluster to use.
@param this_dir: The full path to the directory where this file is located.
"""
# Get the configuration details
static_config, dynamic_config = parallel_params(log_dir, niter)
# Create the runs
for i in xrange(1, niter + 1):
# Build the initial params
params = {k:v.rvs() for k, v in sorted(dynamic_config.items())}
for k, v in static_config.items():
params[k] = v
# Create the base directory
dir = params['log_dir']
splits = os.path.basename(dir).split('-')
dir = os.path.join(os.path.dirname(dir),
'-'.join(s for s in splits[:-1]))
try:
os.makedirs(dir)
except OSError:
pass
# Dump the params as JSON
s = json.dumps(params, sort_keys=True, indent=4,
separators=(',', ': ')).replace('},', '},\n')
with open(os.path.join(dir, 'config.json'), 'wb') as f:
f.write(s)
# Create the runner
mnist_runner_path = os.path.join(this_dir,
'mnist_novelty_detection.py')
command = 'python "{0}" "{1}"'.format(mnist_runner_path, dir)
runner_path = os.path.join(dir, 'runner.sh')
job_name = str(i)
stdio_path = os.path.join(dir, 'stdio.txt')
stderr_path = os.path.join(dir, 'stderr.txt')
create_runner(command=command, runner_path=runner_path,
job_name=job_name, partition_name=partition_name,
stdio_path=stdio_path, stderr_path=stderr_path,
time_limit='00-00:45:00', memory_limit=512)
# Execute the runner
execute_runner(runner_path)
if __name__ == '__main__':
# local = True
local = False
user_path = os.path.expanduser('~')
# partition_name = 'debug'
partition_name = 'work'
# niter = 10
niter = 10000
this_dir = os.path.join(user_path, 'mHTM', 'dev')
if local:
log_dir = os.path.join(user_path, 'scratch', 'novelty_experiments',
'mnist')
config = static_params(log_dir)
base_experiment(config, 1)
else:
if len(sys.argv) == 1:
log_dir = os.path.join(user_path, 'results',
'mnist_novelty_detection')
slurm_prep(log_dir, niter, partition_name, this_dir)
else:
with open(os.path.join(sys.argv[1], 'config.json'), 'rb') as f:
config = json.load(f)
base_experiment(config, 1)
| mit |
undercoveridiot/gunfolds | gunfolds/scripts/stackedbars.py | 1 | 3438 | from gunfolds.tools import zickle as zkl
import matplotlib as mpl
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pylab as pl
import seaborn as sns
def get_counts(d):
eqc = [len(x['eq']) for x in d]
keys = np.sort(np.unique(eqc))
c = {}
for k in keys:
c[k] = len(np.where(eqc == k)[0])
return c
if __name__ == '__main__':
import sys
sys.path.append('/na/homes/splis/soft/src/dev/tools/stackedBarGraph/')
from stackedBarGraph import StackedBarGrapher
SBG = StackedBarGrapher()
fig = pl.figure(figsize=[10, 1.3])
# Read in data & create total column
d = zkl.load("hooke_nodes_6_g32g1_.zkl") # hooke_nodes_35_newp_.zkl")
densities = np.sort(d.keys())
# unique size
usz = set()
dc = {}
for u in densities:
dc[u] = get_counts(d[u])
for v in dc[u]:
usz.add(v)
for u in densities:
for c in usz:
if not c in dc[u]:
dc[u][c] = 0
A = []
for u in densities:
A.append([dc[u][x] for x in np.sort(dc[u].keys())])
# print A
# A = np.array(A)
pp = mpl.colors.LinearSegmentedColormap.from_list("t", sns.color_palette("Paired", len(usz)))
# pp = mpl.colors.LinearSegmentedColormap.from_list("t",sns.dark_palette("#5178C7",len(usz)))
# pp =
# mpl.colors.LinearSegmentedColormap.from_list("t",sns.blend_palette(["mediumseagreen",
# "ghostwhite", "#4168B7"],len(usz)))
scalarMap = mpl.cm.ScalarMappable(norm=lambda x: x / np.double(len(usz)),
cmap=pp)
d_widths = [.5] * len(densities)
d_labels = map(lambda x: str(int(x * 100)) + "%", densities)
# u = np.sort(list(usz))
d_colors = [scalarMap.to_rgba(i) for i in range(len(A[0]))]
# d_colors = ['#2166ac', '#fee090', '#fdbb84', '#fc8d59', '#e34a33',
# '#b30000', '#777777','#2166ac', '#fee090', '#fdbb84', '#fc8d59',
# '#e34a33', '#b30000', '#777777','#2166ac', '#fee090']
ax = fig.add_subplot(111)
SBG.stackedBarPlot(ax,
A,
d_colors,
xLabels=d_labels,
yTicks=3,
widths=d_widths,
gap=0.005,
scale=False
)
for i in range(len(A)):
Ai = [x for x in A[i] if x > 0]
y = [x / 2.0 for x in Ai]
for j in range(len(Ai)):
if j > 0:
yy = y[j] + np.sum(Ai[0:j])
else:
yy = y[j]
pl.text(0.5 * i - 0.02, yy - 1.2, str(Ai[j]), fontsize=12, zorder=10)
# Set general plot properties
# sns.set_style("white")
# sns.set_context({"figure.figsize": (24, 10)})
# for i in np.sort(list(usz))[::-1]:
# y = [100-dc[u][i] for u in np.sort(dc.keys())]
# bottom_plot=sns.barplot(x=np.asarray(densities)*100, y=y)
# color=scalarMap.to_rgba(i))
# y = (sbd[i+1]-sbd[i])/2.+sbd[i]scala
# for j in range(len(sbd.Density)):
# pl.text(j-0.1,y[j],'1',fontsize=16,zorder=i)
# Optional code - Make plot look nicer
sns.despine(left=True)
# Set fonts to consistent 16pt size
for item in ([ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
pl.subplots_adjust(bottom=0.2)
# plt.show()
pl.show()
| gpl-3.0 |
jaffe59/vp-cnn | cnn_classifier/error_analysis.py | 1 | 1799 | import pandas
from statsmodels.sandbox.stats.runs import mcnemar
cnn_file = 'final_run.3.preds.txt'
lr_file = 'stats.1_prev_no_resp.csv'
dialogue_file = 'corrected.tsv'
def read_in_dialogues(dialogue_file):
dialogue_indices = []
dialogue_index = -1
turn_index = -1
records = []
with open(dialogue_file) as l:
for line in l:
if line.startswith('#S'):
dialogue_index += 1
turn_index = 0
else:
dialogue_indices.append((dialogue_index, turn_index))
records.append(line.strip())
turn_index += 1
return dialogue_indices, records
cnn_results = pandas.read_csv(cnn_file)
lr_results = pandas.read_csv(lr_file)
# x = cnn_results[(cnn_results['dial_id'] == lr_results['dial_id']) & (cnn_results['turn_id'] == lr_results['turn_id']
# )& (cnn_results['correct'] != lr_results['correct'])]
k = 0
cnn_right_items = []
x, y = [], []
for index, cnn_item in cnn_results.iterrows():
# print(cnn_item)
lr_item = lr_results[(lr_results['dial_id']==cnn_item['dial_id']) & (lr_results['turn_id']==cnn_item['turn_id'])]
# print(lr_item)
# print(lr_item['correct'].iloc(0), cnn_item['correct'])
# break
if lr_item['correct'].iloc[0] != cnn_item['correct'] and not cnn_item['correct']:
cnn_right_items.append((cnn_item.dial_id, cnn_item.turn_id))
x.append(cnn_item.correct)
y.append(lr_item['correct'].iloc[0])
# print(mcnemar(x,y))
indices, dialogues = read_in_dialogues(dialogue_file)
for item in cnn_right_items:
print(dialogues[indices.index(item)])
cnn_item = cnn_results[(lr_results['dial_id']==item[0]) & (lr_results['turn_id']==item[1])]
print(cnn_item)
#
| apache-2.0 |
samgale/MouseEyeTracker | MouseEyeTracker.py | 1 | 126077 | # -*- coding: utf-8 -*-
"""
GUI for tracking mouse pupil area and position/rotation
Acquire data with camera or analyze data from hdf5 or video file
@author: samgale
"""
from __future__ import division
import sip
sip.setapi('QString', 2)
import h5py, json, math, os, time
import cv2
import numpy as np
import scipy.io
import scipy.signal
from PyQt5 import QtCore, QtGui, QtWidgets
import pyqtgraph as pg
from matplotlib import pyplot as plt
class QtSignalGenerator(QtCore.QObject):
camFrameCapturedSignal = QtCore.pyqtSignal(np.ndarray,int,float)
def __init__(self):
QtCore.QObject.__init__(self)
# frame captured callback must be thread safe
# signal generator is used to send frame data to gui thread
qtSignalGeneratorObj = QtSignalGenerator()
def camFrameCaptured(frame):
img = frame.buffer_data_numpy()
qtSignalGeneratorObj.camFrameCapturedSignal.emit(img,frame.data.frameID,frame.data.timestamp)
frame.queue_for_capture(frame_callback=camFrameCaptured)
def start():
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication([])
eyeTrackerObj = EyeTracker(app)
qtSignalGeneratorObj.camFrameCapturedSignal.connect(eyeTrackerObj.processCamFrame)
app.exec_()
class EyeTracker():
def __init__(self,app):
self.app = app
self.fileOpenSavePath = os.path.dirname(os.path.realpath(__file__))
self.camSavePath = self.fileOpenSavePath
self.camSaveBaseName = 'MouseEyeTracker'
self.camSaveFileType = '.hdf5'
self.camConfig = False
self.skvideo = None
self.ffmpeg = None
self.nidaq = None
self.vimba = None
self.cam = None
self.camFrames = []
self.videoIn = None
self.videoOut = None
self.dataFileIn = None
self.dataFileOut = None
self.image = None
self.displayUpdateInterval = 1
self.roi = None
self.blurSigma = 2.0
self.imgExponent = 2.0
self.showTracking = False
self.stopTracking = False
self.setDataNan = False
self.pupilCenterSeed = None
self.pupilRoi = None
self.pupilCircularityThresh = 0.65
self.pupilGradientDownsample = 0.5
self.reflectCenterSeed = None
self.reflectRoi = []
self.reflectThresh = 254
self.maskRoi = []
self.mmPerPixel = np.nan
self.lensRotRadius = 1.25
self.lensOffset = 0.1
self.corneaOffset = 0.2
self.defaultDataPlotDur = 2.0
self.dataIsLoaded = False
self.selectedSaccade = None
self.negSaccades = np.array([],dtype=int)
self.posSaccades = self.negSaccades.copy()
self.saccadeSmoothPts = 3
self.saccadeThresh = 5
self.saccadeRefractoryPeriod = 0.1
self.configItems = ('camName',
'camType',
'camSavePath',
'camSaveBaseName',
'camSaveFileType',
'camBufferSize',
'camBinning',
'camExposure',
'frameRate',
'displayUpdateInterval',
'roiPos',
'roiSize',
'ffmpeg',
'nidaq',
'cameraMenuNidaqIn',
'cameraMenuNidaqOut')
# main window
self.mainWin = QtWidgets.QMainWindow()
self.mainWin.setWindowTitle('MouseEyeTracker')
self.mainWin.closeEvent = self.mainWinCloseEvent
# file menu
self.menuBar = self.mainWin.menuBar()
self.menuBar.setNativeMenuBar(False)
self.fileMenu = self.menuBar.addMenu('File')
self.fileMenuOpen = self.fileMenu.addMenu('Open')
self.fileMenuOpenFrames = QtWidgets.QAction('Frame Data',self.mainWin)
self.fileMenuOpenFrames.triggered.connect(self.loadFrameData)
self.fileMenuOpenData = QtWidgets.QAction('Tracking Data',self.mainWin,enabled=False)
self.fileMenuOpenData.triggered.connect(self.loadTrackingData)
self.fileMenuOpen.addActions([self.fileMenuOpenFrames,self.fileMenuOpenData])
self.fileMenuSave = self.fileMenu.addMenu('Save')
self.fileMenuSave.setEnabled(False)
self.fileMenuSaveFrames = self.fileMenuSave.addMenu('Frame Data')
self.fileMenuSaveFramesNpz = QtWidgets.QAction('npz',self.mainWin)
self.fileMenuSaveFramesNpz.triggered.connect(self.saveFrameData)
self.fileMenuSaveFramesMat = QtWidgets.QAction('mat',self.mainWin)
self.fileMenuSaveFramesMat.triggered.connect(self.saveFrameData)
self.fileMenuSaveFrames.addActions([self.fileMenuSaveFramesNpz,self.fileMenuSaveFramesMat])
self.fileMenuSaveData = self.fileMenuSave.addMenu('Tracking Data')
self.fileMenuSaveDataHdf5 = QtWidgets.QAction('hdf5',self.mainWin)
self.fileMenuSaveDataHdf5.triggered.connect(self.saveTrackingData)
self.fileMenuSaveDataNpz = QtWidgets.QAction('npz',self.mainWin)
self.fileMenuSaveDataNpz.triggered.connect(self.saveTrackingData)
self.fileMenuSaveDataMat = QtWidgets.QAction('mat',self.mainWin)
self.fileMenuSaveDataMat.triggered.connect(self.saveTrackingData)
self.fileMenuSaveData.addActions([self.fileMenuSaveDataHdf5,self.fileMenuSaveDataNpz,self.fileMenuSaveDataMat])
self.fileMenuSaveImage = QtWidgets.QAction('Image',self.mainWin)
self.fileMenuSaveImage.triggered.connect(self.saveImage)
self.fileMenuSaveMovie = QtWidgets.QAction('Movie',self.mainWin)
self.fileMenuSaveMovie.triggered.connect(self.saveMovie)
self.fileMenuSaveAnnotatedMovie = QtWidgets.QAction('Annotated Movie',self.mainWin,enabled=False)
self.fileMenuSaveAnnotatedMovie.triggered.connect(self.saveMovie)
self.fileMenuSave.addActions([self.fileMenuSaveImage,self.fileMenuSaveMovie,self.fileMenuSaveAnnotatedMovie])
# options menu
self.optionsMenu = self.menuBar.addMenu('Options')
self.optionsMenuShowTracking = QtWidgets.QAction('Show Pupil Tracking Plots',self.mainWin,checkable=True)
self.optionsMenuShowTracking.triggered.connect(self.showPupilTrackingPlots)
self.optionsMenuSetDisplayUpdate = QtWidgets.QAction('Set Display Update Interval',self.mainWin)
self.optionsMenuSetDisplayUpdate.triggered.connect(self.setDisplayUpdateInterval)
self.optionsMenu.addActions([self.optionsMenuShowTracking,self.optionsMenuSetDisplayUpdate])
# camera menu
self.cameraMenu = self.menuBar.addMenu('Camera')
self.cameraMenuUseCam = QtWidgets.QAction('Use Camera',self.mainWin,checkable=True)
self.cameraMenuUseCam.triggered.connect(self.initCamera)
self.cameraMenu.addAction(self.cameraMenuUseCam)
self.cameraMenuLoadConfig = QtWidgets.QAction('Load Configuration',self.mainWin)
self.cameraMenuLoadConfig.triggered.connect(self.loadCamConfig)
self.cameraMenuClearConfig = QtWidgets.QAction('Clear Configuration',self.mainWin)
self.cameraMenuClearConfig.triggered.connect(self.clearCamConfig)
self.cameraMenuSaveConfig = QtWidgets.QAction('Save Configuration',self.mainWin)
self.cameraMenuSaveConfig.triggered.connect(self.saveCamConfig)
self.cameraMenuSaveConfig.setEnabled(False)
self.cameraMenu.addActions([self.cameraMenuLoadConfig,self.cameraMenuClearConfig,self.cameraMenuSaveConfig])
self.cameraMenuSettings = self.cameraMenu.addMenu('Settings')
self.cameraMenuSettings.setEnabled(False)
self.cameraMenuSettingsBufferSize = QtWidgets.QAction('Buffer Size',self.mainWin)
self.cameraMenuSettingsBufferSize.triggered.connect(self.setCamBufferSize)
self.cameraMenuSettingsBinning = QtWidgets.QAction('Spatial Binning',self.mainWin)
self.cameraMenuSettingsBinning.triggered.connect(self.setCamBinning)
self.cameraMenuSettingsExposure = QtWidgets.QAction('Exposure',self.mainWin)
self.cameraMenuSettingsExposure.triggered.connect(self.setCamExposure)
self.cameraMenuSettingsFrameRate = QtWidgets.QAction('Frame Rate',self.mainWin)
self.cameraMenuSettingsFrameRate.triggered.connect(self.setCamFrameRate)
self.cameraMenuSettingsItems = (self.cameraMenuSettingsBufferSize,self.cameraMenuSettingsBinning,self.cameraMenuSettingsExposure,self.cameraMenuSettingsFrameRate)
self.cameraMenuSettings.addActions(self.cameraMenuSettingsItems)
self.cameraMenuNidaq = self.cameraMenu.addMenu('NIDAQ IO')
self.cameraMenuNidaq.setEnabled(False)
self.cameraMenuNidaqIn = QtWidgets.QAction('Use Save Trigger (NIDAQ Input P0.0)',self.mainWin,checkable=True)
self.cameraMenuNidaqIn.triggered.connect(self.setNidaqIO)
self.cameraMenuNidaqOut = QtWidgets.QAction('Signal Saved Frames (NIDAQ Output P1.0)',self.mainWin,checkable=True)
self.cameraMenuNidaqOut.triggered.connect(self.setNidaqIO)
self.cameraMenuNidaq.addActions([self.cameraMenuNidaqIn,self.cameraMenuNidaqOut])
self.cameraMenuSetSavePath = QtWidgets.QAction('Set Save Path',self.mainWin)
self.cameraMenuSetSavePath.triggered.connect(self.setCamSavePath)
self.cameraMenuSetSaveBaseName = QtWidgets.QAction('Set Save Basename',self.mainWin)
self.cameraMenuSetSaveBaseName.triggered.connect(self.setCamSaveBaseName)
self.cameraMenuSetSaveFileType = QtWidgets.QAction('Set Save File Type',self.mainWin)
self.cameraMenuSetSaveFileType.triggered.connect(self.setCamSaveFileType)
self.cameraMenu.addActions([self.cameraMenuSetSavePath,self.cameraMenuSetSaveBaseName,self.cameraMenuSetSaveFileType])
# pupil tracking menu
self.trackMenu = self.menuBar.addMenu('Pupil Tracking')
self.trackMenu.setEnabled(False)
self.trackMenuStopTracking = QtWidgets.QAction('Stop Tracking',self.mainWin,checkable=True)
self.trackMenuStopTracking.triggered.connect(self.toggleStopTracking)
self.trackMenuSetDataNan = QtWidgets.QAction('Set Data NaN',self.mainWin,checkable=True)
self.trackMenuSetDataNan.triggered.connect(self.toggleSetDataNan)
self.trackMenu.addActions([self.trackMenuStopTracking,self.trackMenuSetDataNan])
self.trackMenuMmPerPix = self.trackMenu.addMenu('mm/pixel')
self.trackMenuMmPerPixSet = QtWidgets.QAction('Set',self.mainWin)
self.trackMenuMmPerPixSet.triggered.connect(self.setMmPerPix)
self.trackMenuMmPerPixMeasure = QtWidgets.QAction('Measure',self.mainWin,enabled=False)
self.trackMenuMmPerPixMeasure.triggered.connect(self.measureMmPerPix)
self.trackMenuMmPerPix.addActions([self.trackMenuMmPerPixSet,self.trackMenuMmPerPixMeasure])
self.trackMenuBlurImage = QtWidgets.QAction('Guassian Blur Image',self.mainWin,checkable=True)
self.trackMenuBlurImage.triggered.connect(self.setBlurImage)
self.trackMenuSetBlurSigma = QtWidgets.QAction('Set Blur Sigma',self.mainWin)
self.trackMenuSetBlurSigma.triggered.connect(self.setBlurSigma)
self.trackMenuExpImage = QtWidgets.QAction('Exponentiate Image',self.mainWin,checkable=True)
self.trackMenuExpImage.triggered.connect(self.setExponentiateImage)
self.trackMenuSetExp = QtWidgets.QAction('Set Exponent',self.mainWin)
self.trackMenuSetExp.triggered.connect(self.setExponent)
self.trackMenu.addActions([self.trackMenuBlurImage,self.trackMenuSetBlurSigma,self.trackMenuExpImage,self.trackMenuSetExp])
self.trackMenuReflectType = self.trackMenu.addMenu('Reflection Type')
self.trackMenuReflectTypeSpot = QtWidgets.QAction('Spot',self.mainWin,checkable=True)
self.trackMenuReflectTypeSpot.setChecked(True)
self.trackMenuReflectTypeSpot.triggered.connect(self.setReflectType)
self.trackMenuReflectTypeRing = QtWidgets.QAction('Ring',self.mainWin,checkable=True)
self.trackMenuReflectTypeRing.triggered.connect(self.setReflectType)
self.trackMenuReflectType.addActions([self.trackMenuReflectTypeSpot,self.trackMenuReflectTypeRing])
self.trackMenuReflectThresh = QtWidgets.QAction('Reflection Threshold',self.mainWin)
self.trackMenuReflectThresh.triggered.connect(self.setReflectThresh)
self.trackMenu.addAction(self.trackMenuReflectThresh)
self.trackMenuPupilSign = self.trackMenu.addMenu('Pupil Sign')
self.trackMenuPupilSignNeg = QtWidgets.QAction('Negative',self.mainWin,checkable=True)
self.trackMenuPupilSignNeg.setChecked(True)
self.trackMenuPupilSignNeg.triggered.connect(self.setPupilSign)
self.trackMenuPupilSignPos = QtWidgets.QAction('Positive',self.mainWin,checkable=True)
self.trackMenuPupilSignPos.triggered.connect(self.setPupilSign)
self.trackMenuPupilSign.addActions([self.trackMenuPupilSignNeg,self.trackMenuPupilSignPos])
self.trackMenuPupilMethod = self.trackMenu.addMenu('Pupil Track Method')
self.trackMenuPupilMethodStarburst = QtWidgets.QAction('Starburst',self.mainWin,checkable=True)
self.trackMenuPupilMethodStarburst.setChecked(True)
self.trackMenuPupilMethodStarburst.triggered.connect(self.setPupilTrackMethod)
self.trackMenuPupilMethodLine = QtWidgets.QAction('Line',self.mainWin,checkable=True)
self.trackMenuPupilMethodLine.triggered.connect(self.setPupilTrackMethod)
self.trackMenuPupilMethodGradients = QtWidgets.QAction('Gradients',self.mainWin,checkable=True)
self.trackMenuPupilMethodGradients.triggered.connect(self.setPupilTrackMethod)
self.trackMenuPupilMethodIntensity = QtWidgets.QAction('Intensity',self.mainWin,checkable=True)
self.trackMenuPupilMethodIntensity.triggered.connect(self.setPupilTrackMethod)
self.trackMenuPupilMethod.addActions([self.trackMenuPupilMethodStarburst,self.trackMenuPupilMethodLine,self.trackMenuPupilMethodGradients,self.trackMenuPupilMethodIntensity])
self.trackMenuAdaptThresh = QtWidgets.QAction('Adaptive Threshold',self.mainWin,checkable=True)
self.trackMenuAdaptThresh.triggered.connect(self.setAdaptiveThreshold)
self.trackMenuCircularity = QtWidgets.QAction('Circularity',self.mainWin)
self.trackMenuCircularity.triggered.connect(self.setCircularityThresh)
self.trackMenu.addActions([self.trackMenuAdaptThresh,self.trackMenuCircularity])
self.trackMenuLineOrigin = self.trackMenu.addMenu('Line Origin')
self.trackMenuLineOrigin.setEnabled(False)
self.trackMenuLineOriginLeft = QtWidgets.QAction('Left',self.mainWin,checkable=True)
self.trackMenuLineOriginLeft.setChecked(True)
self.trackMenuLineOriginLeft.triggered.connect(self.setPupilEdgeLineOrigin)
self.trackMenuLineOriginRight = QtWidgets.QAction('Right',self.mainWin,checkable=True)
self.trackMenuLineOriginRight.triggered.connect(self.setPupilEdgeLineOrigin)
self.trackMenuLineOrigin.addActions([self.trackMenuLineOriginLeft,self.trackMenuLineOriginRight])
self.trackMenuGradientDownsamp = QtWidgets.QAction('Gradient Downsample',self.mainWin,enabled=False)
self.trackMenuGradientDownsamp.triggered.connect(self.setPupilGradientDownsample)
self.trackMenu.addAction(self.trackMenuGradientDownsamp)
# analysis menu
self.analysisMenu = self.menuBar.addMenu('Analysis')
self.analysisMenu.setEnabled(False)
self.analysisMenuConvert = self.analysisMenu.addMenu('Convert')
self.analysisMenuConvertPixToDeg = QtWidgets.QAction('Pixels to Degrees',self.mainWin)
self.analysisMenuConvertPixToDeg.triggered.connect(self.pixToDeg)
self.analysisMenuConvertDegToPix = QtWidgets.QAction('Degrees to Pixels',self.mainWin)
self.analysisMenuConvertDegToPix.triggered.connect(self.degToPix)
self.analysisMenuConvert.addActions([self.analysisMenuConvertPixToDeg,self.analysisMenuConvertDegToPix])
self.analysisMenuAnalyzeAll = QtWidgets.QAction('Analyze All Frames',self.mainWin)
self.analysisMenuAnalyzeAll.triggered.connect(self.analyzeAllFrames)
self.analysisMenuFrameIntervals = QtWidgets.QAction('Plot Frame Intervals',self.mainWin)
self.analysisMenuFrameIntervals.triggered.connect(self.plotFrameIntervals)
self.analysisMenu.addActions([self.analysisMenuAnalyzeAll,self.analysisMenuFrameIntervals])
self.analysisMenuSaccades = self.analysisMenu.addMenu('Saccades')
self.analysisMenuSaccadesFind = QtWidgets.QAction('Find',self.mainWin)
self.analysisMenuSaccadesFind.triggered.connect(self.findSaccades)
self.analysisMenuSaccadesDelete = QtWidgets.QAction('Delete All',self.mainWin)
self.analysisMenuSaccadesDelete.triggered.connect(self.deleteAllSaccades)
self.analysisMenuSaccadesSmooth = QtWidgets.QAction('Smoothing',self.mainWin)
self.analysisMenuSaccadesSmooth.triggered.connect(self.setSaccadeSmooth)
self.analysisMenuSaccadesThresh = QtWidgets.QAction('Threshold',self.mainWin)
self.analysisMenuSaccadesThresh.triggered.connect(self.setSaccadeThresh)
self.analysisMenuSaccadesRefractory = QtWidgets.QAction('Refractory Period',self.mainWin)
self.analysisMenuSaccadesRefractory.triggered.connect(self.setSaccadeRefractoryPeriod)
self.analysisMenuSaccades.addActions([self.analysisMenuSaccadesFind,self.analysisMenuSaccadesDelete,self.analysisMenuSaccadesThresh,self.analysisMenuSaccadesSmooth,self.analysisMenuSaccadesRefractory])
# layout
self.createVideoLayout()
self.mainWin.show()
def createLayoutItems(self):
# image window
self.imageLayout = pg.GraphicsLayoutWidget()
self.imageViewBox = self.imageLayout.addViewBox(lockAspect=1,invertY=True,enableMouse=False,enableMenu=False)
self.imageViewBox.keyPressEvent = self.mainWinKeyPressEvent
self.imageItem = pg.ImageItem()
self.imageItem.mouseClickEvent = self.imageMouseClickEvent
self.imageItem.mouseDoubleClickEvent = self.imageDoubleClickEvent
self.imageViewBox.addItem(self.imageItem)
self.pupilCenterPlot = pg.PlotDataItem(x=[],y=[],symbol='+',symbolSize=10,symbolPen='y')
self.imageViewBox.addItem(self.pupilCenterPlot)
self.pupilEllipsePlot = pg.PlotDataItem(x=[],y=[],pen='y')
self.imageViewBox.addItem(self.pupilEllipsePlot)
self.pupilEdgePtsPlot = pg.PlotDataItem(x=[],y=[],pen=None,symbol='o',symbolSize=4,symbolPen='y')
self.imageViewBox.addItem(self.pupilEdgePtsPlot)
self.reflectCenterPlot = pg.PlotDataItem(x=[],y=[],symbol='+',symbolSize=10,symbolPen='r')
self.imageViewBox.addItem(self.reflectCenterPlot)
# buttons
self.startVideoButton = QtWidgets.QPushButton('Start Video',checkable=True)
self.startVideoButton.clicked.connect(self.startVideo)
self.roiButton = QtWidgets.QPushButton('Set ROI',checkable=True)
self.roiButton.clicked.connect(self.setROI)
self.buttons = (self.startVideoButton,self.roiButton)
self.saveCheckBox = QtWidgets.QCheckBox('Save Video Data',enabled=False)
# frame navigation
self.frameNumSpinBox = QtWidgets.QSpinBox()
self.frameNumSpinBox.setPrefix('Frame: ')
self.frameNumSpinBox.setSuffix(' of 0')
self.frameNumSpinBox.setRange(0,1)
self.frameNumSpinBox.setSingleStep(1)
self.frameNumSpinBox.setValue(0)
self.frameNumSpinBox.setEnabled(False)
self.frameNumSpinBox.valueChanged.connect(self.goToFrame)
self.frameNumSpinBox.blockSignals(True)
def createVideoLayout(self):
self.mainWidget = QtWidgets.QWidget()
self.mainWin.setCentralWidget(self.mainWidget)
self.mainLayout = QtWidgets.QGridLayout()
self.setLayoutSize(500,500,2,4)
self.createLayoutItems()
self.mainWidget.setLayout(self.mainLayout)
self.mainLayout.addWidget(self.startVideoButton,0,0,1,1)
self.mainLayout.addWidget(self.roiButton,0,1,1,1)
self.mainLayout.addWidget(self.imageLayout,1,0,2,2)
self.mainLayout.addWidget(self.saveCheckBox,3,0,1,1)
self.mainLayout.addWidget(self.frameNumSpinBox,3,1,1,1)
def createPupilTrackingLayout(self):
self.mainWidget = QtWidgets.QWidget()
self.mainWin.setCentralWidget(self.mainWidget)
self.mainLayout = QtWidgets.QGridLayout()
self.setLayoutSize(1000,500,20,4)
self.mainWidget.setLayout(self.mainLayout)
self.createLayoutItems()
# buttons
self.findPupilButton = QtWidgets.QPushButton('Find Pupil',checkable=True)
self.findPupilButton.clicked.connect(self.findPupil)
self.findReflectButton = QtWidgets.QPushButton('Find Reflection',checkable=True)
self.findReflectButton.clicked.connect(self.findReflect)
self.setMaskButton = QtWidgets.QPushButton('Set Masks',checkable=True)
self.setMaskButton.clicked.connect(self.setMask)
self.buttons += (self.findPupilButton,self.findReflectButton,self.setMaskButton)
self.useMaskCheckBox = QtWidgets.QCheckBox('Use Masks')
self.useMaskCheckBox.clicked.connect(self.setUseMask)
# data plots
self.dataPlotLayout = pg.GraphicsLayoutWidget()
self.pupilAreaPlotItem = self.dataPlotLayout.addPlot(row=0,col=0,enableMenu=False)
self.pupilAreaPlotItem.setMouseEnabled(x=False,y=False)
self.pupilAreaPlotItem.hideButtons()
self.pupilAreaPlotItem.setLabel('left','Pupil Area')
self.pupilAreaPlot = self.pupilAreaPlotItem.plot(x=[0,self.defaultDataPlotDur],y=[0,0])
self.pupilAreaPlotItem.disableAutoRange()
self.pupilXPlotItem = self.dataPlotLayout.addPlot(row=1,col=0,enableMenu=False)
self.pupilXPlotItem.setMouseEnabled(x=False,y=False)
self.pupilXPlotItem.hideButtons()
self.pupilXPlotItem.setLabel('left','Pupil X')
self.pupilXPlotItem.mouseClickEvent = self.dataPlotMouseClickEvent
self.pupilXPlotItem.mouseDoubleClickEvent = self.dataPlotDoubleClickEvent
self.pupilXPlot = self.pupilXPlotItem.plot(x=[0,self.defaultDataPlotDur],y=[0,0])
self.pupilXPlotItem.disableAutoRange()
self.pupilYPlotItem = self.dataPlotLayout.addPlot(row=2,col=0,enableMenu=False)
self.pupilYPlotItem.setMouseEnabled(x=False,y=False)
self.pupilYPlotItem.hideButtons()
self.pupilYPlotItem.setLabel('left','Pupil Y')
self.pupilYPlotItem.setLabel('bottom','Time (s)')
self.pupilYPlot = self.pupilYPlotItem.plot(x=[0,self.defaultDataPlotDur],y=[0,0])
self.pupilYPlotItem.disableAutoRange()
# saccade plots
triangles = [QtGui.QPainterPath() for _ in range(2)]
xpts = [(-0.5,0,0.5)]*2
ypts = [(-0.5,0.5,-0.5),(0.5,-0.5,0.5)]
for tri,x,y in zip(triangles,xpts,ypts):
tri.moveTo(x[0],y[0])
for i in (1,2):
tri.lineTo(x[i],y[i])
tri.closeSubpath()
downTriangle,upTriangle = triangles
self.negSaccadesPlot = self.pupilXPlotItem.plot(x=[],y=[],pen=None,symbol=downTriangle,symbolSize=10,symbolPen='g',symbolBrush='g')
self.posSaccadesPlot = self.pupilXPlotItem.plot(x=[],y=[],pen=None,symbol=upTriangle,symbolSize=10,symbolPen='b',symbolBrush='b')
# pupil tracking parameter plots
numPupilEdges = 18
self.radialProfilePlot = []
self.radialProfilePixAboveThreshPlot = []
for i in range(numPupilEdges):
self.radialProfilePlot.append(self.pupilAreaPlotItem.plot(x=[0],y=[0]))
self.radialProfilePixAboveThreshPlot.append(self.pupilXPlotItem.plot(x=[0],y=[0]))
self.edgeDistPlot = self.pupilYPlotItem.plot(x=[0],y=[0])
self.pupilEdgeThreshLine = pg.InfiniteLine(pos=0,angle=0,pen='r',movable=True,bounds=(0,254))
self.pupilEdgeThreshLine.sigPositionChangeFinished.connect(self.setPupilEdgeThresh)
self.numPixAboveThreshLine = pg.InfiniteLine(pos=0,angle=0,pen='r',movable=True,bounds=(1,1e4))
self.numPixAboveThreshLine.sigPositionChangeFinished.connect(self.setMinNumPixAboveThresh)
self.edgeDistUpperThreshLine = pg.InfiniteLine(pos=0,angle=0,pen='r',movable=True,bounds=(0,1e4))
self.edgeDistUpperThreshLine.sigPositionChangeFinished.connect(self.setEdgeDistThresh)
self.edgeDistLowerThreshLine = pg.InfiniteLine(pos=0,angle=0,pen='r',movable=True,bounds=(0,1e4))
self.edgeDistLowerThreshLine.sigPositionChangeFinished.connect(self.setEdgeDistThresh)
# frame navigation
self.pupilAreaFrameNumLine = pg.InfiniteLine(pos=0,pen='r',movable=True,bounds=(0,1))
self.pupilXFrameNumLine = pg.InfiniteLine(pos=0,pen='r',movable=True,bounds=(0,1))
self.pupilYFrameNumLine = pg.InfiniteLine(pos=0,pen='r',movable=True,bounds=(0,1))
self.frameNumLines = (self.pupilAreaFrameNumLine,self.pupilXFrameNumLine,self.pupilYFrameNumLine)
for line in self.frameNumLines:
line.sigDragged.connect(self.frameNumLineDragged)
line.sigPositionChangeFinished.connect(self.frameNumLinePosChangeFin)
# data plot duration control
self.plotDurLayout = QtWidgets.QFormLayout()
self.plotDurEdit = QtWidgets.QLineEdit(str(self.defaultDataPlotDur))
self.plotDurEdit.setAlignment(QtCore.Qt.AlignHCenter)
self.plotDurEdit.editingFinished.connect(self.changePlotWindowDur)
self.plotDurLayout.addRow('Plot Duration',self.plotDurEdit)
# layout
self.mainLayout.addWidget(self.imageLayout,0,0,4,10)
self.mainLayout.addWidget(self.startVideoButton,0,10,1,2)
self.mainLayout.addWidget(self.roiButton,0,12,1,2)
self.mainLayout.addWidget(self.findPupilButton,0,14,1,2)
self.mainLayout.addWidget(self.findReflectButton,0,16,1,2)
self.mainLayout.addWidget(self.setMaskButton,0,18,1,2)
self.mainLayout.addWidget(self.dataPlotLayout,1,10,2,10)
self.mainLayout.addWidget(self.saveCheckBox,3,10,1,2)
self.mainLayout.addWidget(self.useMaskCheckBox,3,12,1,2)
self.mainLayout.addWidget(self.frameNumSpinBox,3,14,1,3)
self.mainLayout.addLayout(self.plotDurLayout,3,17,1,3)
self.mainWin.keyPressEvent = self.mainWinKeyPressEvent
def setLayoutSize(self,winWidth,winHeight,nCols,nRows):
self.app.processEvents()
self.mainWin.resize(winWidth,winHeight)
mainWinRect = self.mainWin.frameGeometry()
mainWinRect.moveCenter(QtWidgets.QDesktopWidget().availableGeometry().center())
self.mainWin.move(mainWinRect.topLeft())
for col in range(nCols):
self.mainLayout.setColumnMinimumWidth(col,winWidth/nCols)
self.mainLayout.setColumnStretch(col,1)
rowHeights = np.zeros(nRows)
rowHeights[[0,-1]] = 0.05*winHeight
rowHeights[1:-1] = 0.9*winHeight/(nRows-2)
for row in range(nRows):
self.mainLayout.setRowMinimumHeight(row,rowHeights[row])
self.mainLayout.setRowStretch(row,1)
def showPupilTrackingPlots(self):
self.turnOffButtons()
self.app.processEvents()
self.showTracking = self.optionsMenuShowTracking.isChecked()
if self.showTracking:
self.createPupilTrackingLayout()
else:
self.createVideoLayout()
if self.image is not None:
self.initDisplay()
def mainWinCloseEvent(self,event):
if self.cam is not None:
self.closeCamera()
elif self.videoIn is not None:
self.closeVideo()
elif self.dataFileIn is not None:
self.closeDataFileIn()
event.accept()
def saveFrameData(self):
if self.mainWin.sender()==self.fileMenuSaveDataNpz:
fileType = '.npz'
else:
fileType = '.mat'
filePath,fileType = QtWidgets.QFileDialog.getSaveFileName(self.mainWin,'Save As',self.fileOpenSavePath,'*'+fileType)
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
startFrame,endFrame = self.getFrameSaveRange()
if startFrame is None:
return
frameData = np.zeros((endFrame-startFrame+1,self.roiSize[1],self.roiSize[0]),dtype=self.image.dtype)
if self.dataFileIn is None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,startFrame-1)
for i,frame in enumerate(range(startFrame,endFrame+1)):
if self.dataFileIn is None:
isImage,image = self.videoIn.read()
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
else:
image = self.dataFileIn['frame'][frame-1]
frameData[i] = image[self.roiInd]
if self.dataFileIn is None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,self.frameNum-1)
data = {'frameData': frameData}
if fileType=='.npz':
np.savez_compressed(filePath,**data)
else:
scipy.io.savemat(filePath,data,do_compression=True)
def saveTrackingData(self):
if self.mainWin.sender()==self.fileMenuSaveDataHdf5:
fileType = '.hdf5'
elif self.mainWin.sender()==self.fileMenuSaveDataNpz:
fileType = '.npz'
else:
fileType = '.mat'
filePath,fileType = QtWidgets.QFileDialog.getSaveFileName(self.mainWin,'Save As',self.fileOpenSavePath,'*'+fileType)
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
self.reflectCenter += self.roiPos
self.pupilCenter += self.roiPos
params = ('mmPerPixel','frameID','frameTimes','reflectCenter','pupilCenter','pupilArea','pupilX','pupilY','negSaccades','posSaccades')
if fileType=='.hdf5':
dataFile = h5py.File(filePath,'w',libver='latest')
dataFile.attrs.create('mmPerPixel',self.mmPerPixel)
for param in params[1:]:
dataFile.create_dataset(param,data=getattr(self,param),compression='gzip',compression_opts=1)
dataFile.close()
else:
data = {param: getattr(self,param) for param in params}
if fileType=='.npz':
np.savez_compressed(filePath,**data)
else:
scipy.io.savemat(filePath,data,do_compression=True)
def saveImage(self):
if self.image is None:
return
filePath,fileType = QtWidgets.QFileDialog.getSaveFileName(self.mainWin,'Save As',self.fileOpenSavePath,'*.png')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
cv2.imwrite(filePath,self.image)
def saveMovie(self):
filePath,fileType = QtWidgets.QFileDialog.getSaveFileName(self.mainWin,'Save As',self.fileOpenSavePath,'*.mp4')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
if self.skvideo is None:
self.initSkvideo()
if self.skvideo is None:
return
startFrame,endFrame = self.getFrameSaveRange()
if startFrame is None:
return
vidOut = self.skvideo.io.FFmpegWriter(filePath,inputdict={'-r':str(self.frameRate)},outputdict={'-r':str(self.frameRate),'-vcodec':'libx264','-crf':'17'})
if self.dataFileIn is None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,startFrame-1)
for frame in range(startFrame,endFrame+1):
if self.dataFileIn is None:
isImage,image = self.videoIn.read()
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
else:
image = self.dataFileIn['frames'][frame-1]
vidOut.writeFrame(image[self.roiInd])
vidOut.close()
if self.dataFileIn is None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,self.frameNum-1)
def getFrameSaveRange(self):
startFrame,endFrame = (self.frameNum-self.numDataPlotPts,self.frameNum) if self.frameNum>self.numDataPlotPts else (1,self.numDataPlotPts)
text,ok = QtWidgets.QInputDialog.getText(self.mainWin,'Save Movie','Enter frames range:',text=str(startFrame)+'-'+str(endFrame))
if not ok:
return None,None
startFrame,endFrame = [int(n) for n in text.split('-')]
if startFrame<1:
startFrame = 1
if endFrame>self.numFrames:
endFrame = self.numFrames
return startFrame,endFrame
def loadFrameData(self):
filePath,fileType = QtWidgets.QFileDialog.getOpenFileName(self.mainWin,'Choose File',self.fileOpenSavePath,'')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
if self.cam is not None:
self.cameraMenuUseCam.setChecked(False)
self.closeCamera()
elif self.dataFileIn is not None:
self.closeDataFileIn()
elif self.videoIn is not None:
self.closeVideo()
self.mainWin.setWindowTitle('MouseEyeTracker'+' '+filePath)
fileName,fileExt = os.path.splitext(os.path.basename(filePath))
if fileExt=='.hdf5':
self.dataFileIn = h5py.File(filePath,'r')
self.frameRate = self.dataFileIn.attrs.get('frameRate')
self.mmPerPixel = self.dataFileIn.attrs.get('mmPerPixel')
self.frameID = self.dataFileIn['frameID'][:]
self.frameTimes = self.dataFileIn['frameTimes'][:]
self.numFrames = self.dataFileIn['frames'].shape[0]
if np.isnan(self.frameRate):
self.frameRate = self.numFrames/(self.frameTimes[-1]-self.frameTimes[0])
else:
self.videoIn = cv2.VideoCapture(filePath)
self.frameRate = self.videoIn.get(cv2.CAP_PROP_FPS)
self.numFrames = int(round(self.videoIn.get(cv2.CAP_PROP_FRAME_COUNT)))
dataFilePath = os.path.join(os.path.dirname(filePath),fileName+'.hdf5')
if os.path.isfile(dataFilePath):
dataFile = h5py.File(dataFilePath,'r')
self.mmPerPixel = dataFile.attrs.get('mmPerPixel')
self.frameID = dataFile['frameID'][:]
self.frameTimes = dataFile['frameTimes'][:]
else:
self.mmPerPixel = np.nan
self.frameID = np.nan
self.frameTimes = np.nan
if not np.all(np.isnan(self.frameTimes)):
self.frameTimes -= self.frameTimes[0]
self.fileMenuSave.setEnabled(True)
self.frameNum = 1
self.getVideoImage()
self.resetROI()
self.initDisplay()
def loadTrackingData(self):
filePath,fileType = QtWidgets.QFileDialog.getOpenFileName(self.mainWin,'Choose File',self.fileOpenSavePath,'Files (*.hdf5 *.npz *.mat)')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
params = ('mmPerPixel','frameID','frameTimes','reflectCenter','pupilCenter','pupilArea','pupilX','pupilY','negSaccades','posSaccades')
if fileType=='.hdf5':
dataFile = h5py.File(filePath,'r')
if 'mmPerPixel' in dataFile.attrs.keys():
self.mmPerPixel = dataFile.attrs.get('mmPerPixel')
for param in set(dataFile.keys()) & set(params[1:]):
setattr(self,param,dataFile[param][:])
dataFile.close()
else:
data = np.load(filePath) if fileType=='.npz' else scipy.io.loadmat(filePath,squeeze_me=True)
for param in set(data.keys()) & set(params):
setattr(self,param,data[param])
self.dataIsLoaded = True
self.reflectCenter -= self.roiPos
self.pupilCenter -= self.roiPos
self.pupilAreaRange = [self.pupilArea[self.frameNum-1]]*2
self.pupilXRange = [self.pupilX[self.frameNum-1]]*2
self.pupilYRange = [self.pupilY[self.frameNum-1]]*2
self.setDataPlotXRange()
self.updatePupilDataPlot()
self.plotSaccades()
def closeDataFileIn(self):
self.closeFileCleanup()
self.dataIsLoaded = False
self.dataFileIn.close()
self.dataFileIn = None
def closeVideo(self):
self.closeFileCleanup()
self.videoIn.release()
self.videoIn = None
def closeFileCleanup(self):
self.turnOffButtons()
self.frameNumSpinBox.setEnabled(False)
self.frameNumSpinBox.blockSignals(True)
self.frameNumSpinBox.setRange(0,1)
self.frameNumSpinBox.setValue(0)
self.frameNumSpinBox.setSuffix(' of 0')
self.fileMenuOpenData.setEnabled(False)
self.fileMenuSave.setEnabled(False)
self.analysisMenu.setEnabled(False)
self.image = None
self.frameTimes = []
if self.showTracking:
self.removeFrameNumLines()
for line in self.frameNumLines:
line.setValue(0)
self.resetPupilTracking()
self.deleteAllSaccades()
self.mainWin.setWindowTitle('MouseEyeTracker')
def initCamera(self):
if self.cameraMenuUseCam.isChecked():
if self.dataFileIn is not None:
self.closeDataFileIn()
elif self.videoIn is not None:
self.closeVideo()
if self.camConfig:
if self.camType=='vimba':
self.initVimba()
if self.nidaq:
self.initNidaq()
if self.camSaveFileType=='.mp4' and self.skvideo is None:
self.initSkvideo()
if self.skvideo is None:
self.camSaveFileType = '.hdf5'
else:
self.getCamera()
self.initNidaq()
if self.camType=='vimba':
self.cam = self.vimba.camera(self.camName)
self.cam.open()
elif self.camType=='webcam':
self.cam = cv2.VideoCapture(int(self.camName[6:]))
else:
self.cameraMenuUseCam.setChecked(False)
return
self.mainWin.setWindowTitle('MouseEyeTracker'+' '+'camera: '+self.camName+' '+'nidaq: '+str(self.nidaq))
self.setCamProps()
self.frameNum = 0
self.getCamImage()
self.initDisplay()
self.cameraMenuSettings.setEnabled(True)
for item in self.cameraMenuSettingsItems:
if self.camType=='vimba' or item in (self.cameraMenuSettingsBinning,self.cameraMenuSettingsExposure):
item.setEnabled(True)
else:
item.setEnabled(False)
self.cameraMenuLoadConfig.setEnabled(False)
self.cameraMenuClearConfig.setEnabled(False)
self.cameraMenuSaveConfig.setEnabled(True)
self.trackMenuMmPerPixMeasure.setEnabled(True)
if not self.cameraMenuNidaqIn.isChecked():
self.saveCheckBox.setEnabled(True)
else:
self.closeCamera()
def getCamera(self):
self.initVimba()
vimbaCams = [] if self.vimba is None else self.vimba.camera_ids()
webcams = []
i = 0
while True:
cam = cv2.VideoCapture(i)
isImage,image = cam.read()
if isImage:
webcams.append('webcam'+str(i))
i += 1
else:
break
selectedCam,ok = QtWidgets.QInputDialog.getItem(self.mainWin,'Choose Camera','Camera IDs:',vimbaCams+webcams,editable=False)
if ok:
self.camName = selectedCam
self.camType = 'vimba' if selectedCam in vimbaCams else 'webcam'
else:
self.camName = self.camType = None
def initVimba(self):
try:
if self.vimba is None:
import pymba
self.vimba = pymba.Vimba()
self.vimba.startup()
self.vimba.system().run_feature_command("GeVDiscoveryAllOnce")
time.sleep(0.2)
except:
if self.vimba is not None:
self.vimba.shutdown()
self.vimba = None
print('Unable to initialize vimba')
def initNidaq(self):
try:
import nidaqmx
if not self.camConfig:
deviceNames = nidaqmx.system._collections.device_collection.DeviceCollection().device_names
selectedDevice,ok = QtWidgets.QInputDialog.getItem(self.mainWin,'Choose Nidaq Device','Nidaq Devices:',deviceNames,editable=False)
if ok:
self.nidaq = selectedDevice
self.cameraMenuNidaqOut.setChecked(True)
else:
return
self.nidaqDigitalIn = nidaqmx.Task()
self.nidaqDigitalIn.di_channels.add_di_chan(self.nidaq+'/port0/line0',line_grouping=nidaqmx.constants.LineGrouping.CHAN_PER_LINE)
self.nidaqDigitalOut = nidaqmx.Task()
self.nidaqDigitalOut.do_channels.add_do_chan(self.nidaq+'/port1/line0',line_grouping=nidaqmx.constants.LineGrouping.CHAN_PER_LINE)
self.cameraMenuNidaq.setEnabled(True)
except:
self.nidaq = None
print('Unable to initialize nidaq')
def initSkvideo(self):
try:
if self.ffmpeg is None:
ffmpegPath = QtWidgets.QFileDialog.getExistingDirectory(self.mainWin,'Select directory containing ffmpeg.exe','')
if ffmpegPath!='':
self.ffmpeg = ffmpegPath
else:
return
import skvideo
skvideo.setFFmpegPath(self.ffmpeg) # run this before importing skvideo.io
import skvideo.io
self.skvideo = skvideo
except:
self.ffmpeg = None
print('Unable to initialize skvideo')
def closeCamera(self):
self.turnOffButtons()
if self.camType=='vimba':
self.cam.close()
self.vimba.shutdown()
else:
self.cam.release()
self.cam = None
self.image = None
if self.nidaq is not None:
self.nidaqDigitalIn.close()
self.nidaqDigitalOut.close()
self.nidaq = None
self.cameraMenuSettings.setEnabled(False)
self.cameraMenuLoadConfig.setEnabled(True)
self.cameraMenuClearConfig.setEnabled(True)
self.cameraMenuSaveConfig.setEnabled(False)
self.trackMenuMmPerPixMeasure.setEnabled(False)
self.saveCheckBox.setEnabled(False)
if self.showTracking:
self.resetPupilTracking()
self.mainWin.setWindowTitle('MouseEyeTracker')
def startCamera(self,bufferSize=1):
self.cameraMenuSettings.setEnabled(False)
if self.nidaq is not None:
self.nidaqDigitalIn.start()
self.nidaqDigitalOut.start()
self.nidaqDigitalOut.write(False)
if self.camType=='vimba':
for _ in range(bufferSize):
frame = self.cam.new_frame()
frame.announce()
self.camFrames.append(frame)
self.cam.start_capture()
def stopCamera(self):
if self.camType=='vimba':
self.cam.AcquisitionStop()
self.cam.end_capture()
self.cam.flush_capture_queue()
self.cam.revoke_all_frames()
self.camFrames = []
if self.dataFileOut is not None:
self.closeDataFileOut()
if self.nidaq is not None:
self.nidaqDigitalIn.stop()
self.nidaqDigitalOut.stop()
self.cameraMenuSettings.setEnabled(True)
def getCamImage(self):
self.startCamera()
if self.camType=='vimba':
frame = self.camFrames[0]
frame.queue_for_capture()
self.cam.AcquisitionStart()
frame.wait_for_capture()
self.image = frame.buffer_data_numpy()
else:
isImage,image = self.cam.read()
self.image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
self.stopCamera()
def setCamProps(self):
if self.camType=='vimba':
if not self.camConfig:
self.camBinning = 1
self.cam.feature('BinningHorizontal').value = self.camBinning
self.cam.feature('BinningVertical').value = self.camBinning
self.fullRoiSize = (self.cam.feature('WidthMax').value,self.cam.feature('HeightMax').value)
if not self.camConfig:
self.camBufferSize = 60
self.camExposure = 0.9
self.frameRate = 60.0
self.roiPos = (0,0)
self.roiSize = self.fullRoiSize
self.roiInd = np.s_[0:self.roiSize[1],0:self.roiSize[0]]
self.cam.feature('PixelFormat').value ='Mono8'
self.cam.feature('OffsetX').value = self.roiPos[0]
self.cam.feature('OffsetY').value = self.roiPos[1]
self.cam.feature('Width').value = self.roiSize[0]
self.cam.feature('Height').value = self.roiSize[1]
self.cam.feature('ExposureAuto').value = 'Off'
self.cam.feature('ExposureTimeAbs').value = self.camExposure*1e6/self.frameRate
self.cam.feature('AcquisitionFrameRateAbs').value = self.frameRate
self.cam.feature('AcquisitionMode').value = 'Continuous'
self.cam.feature('TriggerMode').value = 'Off'
self.cam.feature('TriggerSource').value = 'FixedRate'
self.cam.feature('SyncOutSelector').value = 'SyncOut2'
self.cam.feature('SyncOutSource').value = 'GPO'
self.cam.feature('SyncOutLevels').value = 0
self.cam.feature('SyncOutPolarity').value = 'Normal'
else:
self.camBufferSize = None
self.frameRate = np.nan
self.webcamDefaultFrameShape = (int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)),int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)))
if not self.camConfig:
self.camBinning = 1
self.camExposure = 1
self.roiPos = (0,0)
self.roiSize = self.webcamDefaultFrameShape[::-1]
self.roiInd = np.s_[self.roiPos[1]:self.roiPos[1]+self.roiSize[1],self.roiPos[0]:self.roiPos[0]+self.roiSize[0]]
if self.camBinning>1:
h,w = [int(n/self.camBinning) for n in self.webcamDefaultFrameShape]
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,h)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH,w)
self.fullRoiSize = (w,h)
else:
self.fullRoiSize = self.webcamDefaultFrameShape[::-1]
self.cam.set(cv2.CAP_PROP_EXPOSURE,math.log(self.camExposure/1000,2))
def setCamBufferSize(self):
val,ok = QtWidgets.QInputDialog.getInt(self.mainWin,'Set Camera Buffer Size','Frames:',value=self.camBufferSize,min=1)
if not ok:
return
self.camBufferSize = val
def setCamBinning(self):
val,ok = QtWidgets.QInputDialog.getInt(self.mainWin,'Set Camera Spatial Binning','Pixels:',value=self.camBinning,min=1,max=8)
if not ok:
return
if self.showTracking:
scaleFactor = self.camBinning/val
if self.pupilCenterSeed is not None:
self.pupilCenterSeed = [int(n*scaleFactor) for n in self.pupilCenterSeed]
if self.reflectCenterSeed is not None:
self.reflectCenterSeed = [int(n*scaleFactor) for n in self.reflectCenterSeed]
if self.pupilRoi is not None:
self.pupilRoiPos = [int(n*scaleFactor) for n in self.pupilRoiPos]
self.pupilRoiSize = [int(n*scaleFactor) for n in self.pupilRoiSize]
for i,roi in enumerate(self.reflectRoi):
self.reflectRoiPos[i] = [int(n*scaleFactor) for n in roi.pos()]
self.reflectRoiSize[i] = [int(n*scaleFactor) for n in roi.size()]
if len(self.maskRoi)>0:
for roi in self.maskRoi:
roi.setPos([int(n*scaleFactor) for n in roi.pos()])
roi.setSize([int(n*scaleFactor) for n in roi.size()])
self.updateMaskIndex()
self.camBinning = val
if self.camType=='vimba':
self.cam.feature('BinningHorizontal').value = val
self.cam.feature('BinningVertical').value = val
else:
h,w = [int(n/val) for n in self.webcamDefaultFrameShape]
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,h)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH,w)
self.resetROI()
self.resetImage()
def setCamExposure(self):
if self.camType=='vimba':
units = 'Fraction of frame interval:'
minVal = 0.001
maxVal = 0.99
else:
units = 'Exposure time (ms):'
minVal = 0.001
maxVal = 10000
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Camera Exposure',units,value=self.camExposure,min=minVal,max=maxVal,decimals=3)
if not ok:
return
self.camExposure = val
if self.camType=='vimba':
self.cam.feature('ExposureTimeAbs').value = self.camExposure*1e6/self.frameRate
else:
self.cam.set(cv2.CAP_PROP_EXPOSURE,math.log(val/1000,2))
def setCamFrameRate(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Camera Frame Rate','Frames/s:',value=self.frameRate,min=0.01,max=119.30,decimals=2)
if not ok:
return
self.frameRate = val
self.cam.feature('AcquisitionFrameRateAbs').value = self.frameRate
self.cam.feature('ExposureTimeAbs').value = self.camExposure*1e6/self.frameRate
self.changePlotWindowDur()
def setNidaqIO(self):
if self.mainWin.sender() is self.cameraMenuNidaqIn:
if self.cameraMenuNidaqIn.isChecked():
self.saveCheckBox.setEnabled(False)
else:
if self.cam is not None:
self.saveCheckBox.setEnabled(True)
def setCamSavePath(self):
dirPath = QtWidgets.QFileDialog.getExistingDirectory(self.mainWin,'Choose Directory',self.camSavePath)
if dirPath!='':
self.camSavePath = dirPath
def setCamSaveBaseName(self):
val,ok = QtWidgets.QInputDialog.getText(self.mainWin,'Set File Base Name','',text=self.camSaveBaseName)
if ok:
self.camSaveBaseName = val
def setCamSaveFileType(self):
fileType,ok = QtWidgets.QInputDialog.getItem(self.mainWin,'Choose Save File Type','File Type:',('.hdf5','.mp4'),editable=False)
if not ok:
return
if fileType=='.mp4' and self.skvideo is None:
self.initSkvideo()
if self.skvideo is not None:
self.camSaveFileType = fileType
else:
self.camSaveFileType = fileType
def loadCamConfig(self):
filePath,fileType = QtWidgets.QFileDialog.getOpenFileName(self.mainWin,'Choose File',self.fileOpenSavePath,'*.json')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
with open(filePath,'r') as file:
config = json.load(file)
for item in self.configItems:
attr = getattr(self,item,None)
if isinstance(attr,QtWidgets.QAction):
attr.setChecked(config[item])
else:
setattr(self,item,config[item])
self.camConfig = True
self.cameraMenuUseCam.setChecked(True)
self.initCamera()
def clearCamConfig(self):
self.camConfig = False
def saveCamConfig(self):
filePath,fileType = QtWidgets.QFileDialog.getSaveFileName(self.mainWin,'Save As',self.fileOpenSavePath,'*.json')
if filePath=='':
return
self.fileOpenSavePath = os.path.dirname(filePath)
config = {}
for item in self.configItems:
attr = getattr(self,item)
if isinstance(attr,QtWidgets.QAction):
config[item] = attr.isChecked()
else:
config[item] = attr
with open(filePath,'w') as file:
json.dump(config,file)
def getVideoImage(self):
if self.dataFileIn is not None:
self.image = self.dataFileIn['frames'][self.frameNum-1]
else:
isImage,image = self.videoIn.read()
self.image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
if self.trackMenuBlurImage.isChecked():
self.blurImage()
if self.trackMenuExpImage.isChecked():
self.exponentiateImage()
def blurImage(self):
self.image = cv2.GaussianBlur(self.image,(0,0),self.blurSigma)
def setBlurImage(self):
self.getVideoImage()
self.updateDisplay()
def setBlurSigma(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Blur Sigma','',value=self.blurSigma,min=0.01,decimals=2)
if not ok:
return
self.blurSigma = val
if self.trackMenuBlurImage.isChecked():
self.getVideoImage()
self.updateDisplay()
def exponentiateImage(self):
self.image = self.image.astype(float)
self.image /= self.image.max()
self.image = 1-self.image
self.image **= self.imgExponent
self.image = 1-self.image
self.image *= 255/self.image.max()
self.image = self.image.astype(np.uint8)
def setExponentiateImage(self):
self.getVideoImage()
self.updateDisplay()
def setExponent(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Exponent','',value=self.imgExponent,min=0.01,decimals=2)
if not ok:
return
self.imgExponent = val
if self.trackMenuExpImage.isChecked():
self.getVideoImage()
self.updateDisplay()
def initDisplay(self):
self.resetImage()
if self.cam is None:
self.frameNumSpinBox.setRange(1,self.numFrames)
self.frameNumSpinBox.setValue(self.frameNum)
self.frameNumSpinBox.setSuffix(' of '+str(self.numFrames))
self.frameNumSpinBox.blockSignals(False)
self.frameNumSpinBox.setEnabled(True)
if self.showTracking:
if self.cam is None:
for line in self.frameNumLines:
line.setBounds((0,(self.numFrames-1)/self.frameRate))
self.addFrameNumLines()
self.setDataPlotDur()
self.setDataPlotTime()
self.resetPupilData()
self.resetPupilDataPlot()
self.setDataPlotXRange()
self.trackMenu.setEnabled(True)
self.analysisMenu.setEnabled(True)
else:
self.trackMenu.setEnabled(False)
self.analysisMenu.setEnabled(False)
def resetImage(self):
self.imageItem.setImage(self.image[self.roiInd].T,levels=(0,255))
self.imageViewBox.autoRange(items=[self.imageItem])
if self.pupilCenterSeed is not None:
self.pupilCenterPlot.setData(x=[self.pupilCenterSeed[0]],y=[self.pupilCenterSeed[1]])
self.pupilEllipsePlot.setData(x=[],y=[])
if self.reflectCenterSeed is not None:
self.reflectCenterPlot.setData(x=[self.reflectCenterSeed[0]],y=[self.reflectCenterSeed[1]])
self.getRadialLines()
def resetROI(self,keepPosAndSize=False):
if self.cam is not None:
if self.camType=='vimba':
self.cam.feature('OffsetX').value = 0
self.cam.feature('OffsetY').value = 0
self.cam.feature('Width').value = self.cam.feature('WidthMax').value
self.cam.feature('Height').value = self.cam.feature('HeightMax').value
self.getCamImage()
self.fullRoiSize = (self.image.shape[1],self.image.shape[0])
self.roiInd = np.s_[0:self.fullRoiSize[1],0:self.fullRoiSize[0]]
if not keepPosAndSize:
self.roiPos = (0,0)
self.roiSize = (self.image.shape[1],self.image.shape[0])
def resetPupilTracking(self):
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
self.pupilCenterSeed = None
self.reflectCenterPlot.setData(x=[],y=[])
self.reflectCenterSeed = None
if self.pupilRoi is not None:
self.imageViewBox.removeItem(self.pupilRoi)
self.pupilRoi = None
for roi in self.reflectRoi:
self.imageViewBox.removeItem(roi)
self.reflectRoi = []
for roi in self.maskRoi:
self.imageViewBox.removeItem(roi)
self.maskRoi = []
if self.stopTracking:
self.toggleStopTracking()
if self.setDataNan:
self.toggleSetDataNan()
def resetPupilData(self):
self.dataPlotIndex = 0
n = self.numFrames if self.cam is None else self.numDataPlotPts
self.reflectCenter = np.full((n,2),np.nan)
self.pupilCenter = np.full((n,2),np.nan)
self.pupilArea = np.full(n,np.nan)
self.pupilX = np.full(n,np.nan)
self.pupilY = np.full(n,np.nan)
def resetPupilDataPlot(self):
self.pupilAreaPlot.setData(x=[0,self.dataPlotDur],y=[0,0])
self.pupilXPlot.setData(x=[0,self.dataPlotDur],y=[0,0])
self.pupilYPlot.setData(x=[0,self.dataPlotDur],y=[0,0])
def startVideo(self):
if self.image is None:
return
elif self.startVideoButton.isChecked():
self.turnOffButtons(source=self.startVideoButton)
self.startVideoButton.setText('Stop Video')
if self.cam is None:
self.frameNumSpinBox.blockSignals(True)
if self.frameNum==self.numFrames:
self.frameNum = 0
if self.showTracking:
self.setDataPlotXRange()
if self.videoIn is not None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,0)
while self.startVideoButton.isChecked():
if self.frameNum==self.numFrames:
self.startVideoButton.click()
break
self.frameNum += 1
self.frameNumSpinBox.setValue(self.frameNum)
self.getVideoImage()
self.updateDisplay()
self.app.processEvents()
else:
self.frameNum = 0
self.startCamera(bufferSize=self.camBufferSize)
if self.camType=='vimba':
if self.showTracking:
self.resetPupilData()
for frame in self.camFrames:
frame.queue_for_capture(frame_callback=camFrameCaptured)
self.cam.AcquisitionStart()
else:
while self.startVideoButton.isChecked():
isImage,image = self.cam.read()
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
self.processCamFrame(image,self.frameNum+1,time.perf_counter())
self.app.processEvents()
else:
if self.cam is not None:
self.stopCamera()
self.updateDisplay(showAll=True)
if self.cam is None:
self.frameNumSpinBox.blockSignals(False)
self.startVideoButton.setText('Start Video')
def updateDisplay(self,showAll=False,showNone=False):
n = (self.frameNum-1)%self.displayUpdateInterval
if showAll or (not showNone and n==0):
self.imageItem.setImage(self.image[self.roiInd].T,levels=(0,255))
if self.showTracking:
if self.cam is None:
self.selectedSaccade = None
elif self.camType=='vimba':
if self.dataPlotIndex==self.numDataPlotPts-1:
self.dataPlotIndex = 0
else:
self.dataPlotIndex += 1
if not self.stopTracking and (self.setDataNan or self.reflectCenterSeed is not None):
self.trackReflect()
if showAll or (not showNone and n==0):
if self.reflectFound:
self.reflectCenterPlot.setData(x=[self.reflectCenterSeed[0]],y=[self.reflectCenterSeed[1]])
else:
self.reflectCenterPlot.setData(x=[],y=[])
if not self.stopTracking and (self.setDataNan or self.pupilCenterSeed is not None):
self.trackPupil()
if showAll or (not showNone and n==0):
self.updatePupilPlot()
if self.pupilCenterSeed is not None or self.dataIsLoaded:
if showAll or (not showNone and n==1):
self.updatePupilDataPlot()
if self.trackMenuAdaptThresh.isChecked():
self.meanImageIntensity = self.image.mean()
def setDisplayUpdateInterval(self):
val,ok = QtWidgets.QInputDialog.getInt(self.mainWin,'Set Display Update Interval','Frames:',value=self.displayUpdateInterval,min=1,max=10)
if ok:
self.displayUpdateInterval = val
def processCamFrame(self,img,frameID,timestamp):
self.frameNum += 1
self.image = img
showNone = False
if self.saveCheckBox.isChecked() or (self.nidaq and self.cameraMenuNidaqIn.isChecked() and self.nidaqDigitalIn.read()):
if self.dataFileOut is None:
if self.camType=='vimba':
self.cam.AcquisitionStop()
self.cam.end_capture() # resets next frameID to 1
if self.cameraMenuNidaqIn.isChecked():
self.saveCheckBox.setChecked(True)
self.frameNum = 0
fileName = os.path.join(self.camSavePath,self.camSaveBaseName+'_'+time.strftime('%Y%m%d_%H%M%S'))
self.dataFileOut = h5py.File(fileName+'.hdf5','w',libver='latest')
self.dataFileOut.attrs.create('frameRate',self.frameRate)
self.dataFileOut.attrs.create('mmPerPixel',self.mmPerPixel)
self.frameIdDataset = self.dataFileOut.create_dataset('frameID',(0,),maxshape=(None,),dtype=int)
self.frameTimeDataset = self.dataFileOut.create_dataset('frameTimes',(0,),maxshape=(None,),dtype=float)
if self.camSaveFileType=='.hdf5':
imgShape = tuple(self.roiSize[::-1])
self.frameDataset = self.dataFileOut.create_dataset('frames',(0,)+imgShape,maxshape=(None,)+imgShape,dtype=img.dtype,chunks=(1,)+imgShape,compression='gzip',compression_opts=1)
else:
self.videoOut = self.skvideo.io.FFmpegWriter(fileName+self.camSaveFileType,inputdict={'-r':str(self.frameRate)},outputdict={'-r':str(self.frameRate),'-vcodec':'libx264','-crf':'17'})
if self.camType=='vimba':
self.cam.feature('SyncOutSelector').value = 'SyncOut2'
self.cam.feature('SyncOutSource').value = 'Exposing'
self.cam.start_capture()
self.cam.AcquisitionStart()
showNone = True
else:
if self.nidaq and self.cameraMenuNidaqOut.isChecked():
self.nidaqDigitalOut.write(True)
if self.camType=='vimba':
timestamp /= self.cam.GevTimestampTickFrequency
self.frameIdDataset.resize(self.frameNum,axis=0)
self.frameIdDataset[-1] = frameID
self.frameTimeDataset.resize(self.frameNum,axis=0)
self.frameTimeDataset[-1] = timestamp
if self.camSaveFileType=='.hdf5':
self.frameDataset.resize(self.frameNum,axis=0)
self.frameDataset[-1] = img[self.roiInd]
else:
self.videoOut.writeFrame(img[self.roiInd])
if self.nidaq and self.cameraMenuNidaqOut.isChecked():
self.nidaqDigitalOut.write(False)
elif self.dataFileOut is not None:
if self.camType=='vimba':
self.cam.AcquisitionStop()
self.closeDataFileOut()
if self.camType=='vimba':
self.cam.feature('SyncOutSource').value = 'GPO'
self.cam.feature('SyncOutLevels').value = 0
self.cam.AcquisitionStart()
showNone = True
self.updateDisplay(showNone)
def closeDataFileOut(self):
self.dataFileOut.close()
self.dataFileOut = None
if self.videoOut is not None:
self.videoOut.close()
self.videoOut = None
self.frameNum = 0
if self.cameraMenuNidaqIn.isChecked():
self.saveCheckBox.setChecked(False)
def toggleStopTracking(self):
self.stopTracking = not self.stopTracking
self.trackMenuStopTracking.setChecked(self.stopTracking)
if self.stopTracking:
self.reflectCenterPlot.setData(x=[],y=[])
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
if self.setDataNan:
self.toggleSetDataNan()
def toggleSetDataNan(self):
self.setDataNan = not self.setDataNan
self.trackMenuSetDataNan.setChecked(self.setDataNan)
if self.setDataNan and self.cam is None and not any([button.isChecked() for button in self.buttons]):
if self.stopTracking:
self.toggleStopTracking()
self.setCurrentFrameDataNan()
def setCurrentFrameDataNan(self):
self.reflectCenterPlot.setData(x=[],y=[])
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
self.pupilArea[self.frameNum-1] = np.nan
self.pupilX[self.frameNum-1] = np.nan
self.pupilY[self.frameNum-1] = np.nan
if self.pupilCenterSeed is not None or self.dataIsLoaded:
self.updatePupilDataPlot()
def mainWinKeyPressEvent(self,event):
key = event.key()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if key in (QtCore.Qt.Key_Comma,QtCore.Qt.Key_Period):
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
frameShift = int(0.9*self.numDataPlotPts) if int(modifiers & QtCore.Qt.ControlModifier)>0 else 1
if key==QtCore.Qt.Key_Comma:
self.frameNum -= frameShift
if self.frameNum<1:
self.frameNum = 1
else:
self.frameNum += frameShift
if self.frameNum>self.numFrames:
self.frameNum = self.numFrames
self.frameNumSpinBox.setValue(self.frameNum)
elif key in (QtCore.Qt.Key_Left,QtCore.Qt.Key_Right,QtCore.Qt.Key_Up,QtCore.Qt.Key_Down,QtCore.Qt.Key_Minus,QtCore.Qt.Key_Equal):
if key in (QtCore.Qt.Key_Left,QtCore.Qt.Key_Right,QtCore.Qt.Key_Up,QtCore.Qt.Key_Down) and self.cam is None and not any([button.isChecked() for button in self.buttons]) and self.selectedSaccade is not None:
if key in (QtCore.Qt.Key_Left,QtCore.Qt.Key_Right):
saccades = self.negSaccades if self.selectedSaccade in self.negSaccades else self.posSaccades
move = -1 if key==QtCore.Qt.Key_Left else 1
saccades[saccades==self.selectedSaccade] += move
self.selectedSaccade += move
elif key==QtCore.Qt.Key_Up and self.selectedSaccade in self.negSaccades:
self.negSaccades = self.negSaccades[self.negSaccades!=self.selectedSaccade]
self.posSaccades = np.unique(np.concatenate((self.posSaccades,[self.selectedSaccade])))
elif key==QtCore.Qt.Key_Down and self.selectedSaccade in self.posSaccades:
self.posSaccades = self.posSaccades[self.posSaccades!=self.selectedSaccade]
self.negSaccades = np.unique(np.concatenate((self.negSaccades,[self.selectedSaccade])))
else:
return
self.plotSaccades()
else:
if self.roiButton.isChecked():
roi = self.roi
elif self.findPupilButton.isChecked and self.pupilRoi is not None:
roi = self.pupilRoi
elif self.findReflectButton.isChecked() and len(self.reflectRoi)>0:
roi = self.reflectRoi[-1]
elif self.setMaskButton.isChecked() and len(self.maskRoi)>0:
roi = self.maskRoi[-1]
else:
return
roiPos = [int(n) for n in roi.pos()]
roiSize = [int(n) for n in roi.size()]
if key in (QtCore.Qt.Key_Left,QtCore.Qt.Key_Equal):
if roiPos[0]>0:
roiPos[0] -= 1
elif key in (QtCore.Qt.Key_Right,QtCore.Qt.Key_Minus):
if (roiPos[0]+roiSize[0])<self.roiSize[0]:
roiPos[0] += 1
if key in (QtCore.Qt.Key_Up,QtCore.Qt.Key_Equal):
if roiPos[1]>0:
roiPos[1] -= 1
elif key in (QtCore.Qt.Key_Down,QtCore.Qt.Key_Minus):
if (roiPos[1]+roiSize[1])<self.roiSize[1]:
roiPos[1] += 1
if key==QtCore.Qt.Key_Minus:
roiSize = [n-2 for n in roiSize]
elif key==QtCore.Qt.Key_Equal:
if (roiPos[0]+roiSize[0])<self.roiSize[0]:
roiSize[0] += 2
if (roiPos[1]+roiSize[1])<self.roiSize[1]:
roiSize[1] += 2
roi.setPos(roiPos)
roi.setSize(roiSize)
elif self.showTracking:
if key==QtCore.Qt.Key_Escape:
self.toggleStopTracking()
elif key==QtCore.Qt.Key_N:
if int(modifiers & QtCore.Qt.ControlModifier)>0:
self.toggleSetDataNan()
elif self.cam is None and not any([button.isChecked() for button in self.buttons]):
self.setCurrentFrameDataNan()
elif key==QtCore.Qt.Key_Space:
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
self.changePlotWindowDur(fullRange=True)
elif key==QtCore.Qt.Key_Delete:
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
if int(modifiers & QtCore.Qt.ControlModifier)>0:
self.deleteAllSaccades()
elif self.selectedSaccade is not None:
self.negSaccades = self.negSaccades[self.negSaccades!=self.selectedSaccade]
self.posSaccades = self.posSaccades[self.posSaccades!=self.selectedSaccade]
self.selectedSaccade = None
self.plotSaccades()
elif self.setMaskButton.isChecked() and len(self.maskRoi)>0:
self.imageViewBox.removeItem(self.maskRoi[-1])
del(self.maskRoi[-1])
del(self.maskIndex[-1])
elif key==QtCore.Qt.Key_F:
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
self.findSaccades()
elif key==QtCore.Qt.Key_S:
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
self.posSaccades = np.unique(np.concatenate((self.posSaccades,[self.frameNum-1])))
self.selectedSaccade = self.frameNum-1
self.plotSaccades()
def imageMouseClickEvent(self,event):
if event.button()==QtCore.Qt.RightButton and not self.roiButton.isChecked() and not self.findReflectButton.isChecked() and self.reflectCenterSeed is not None:
if self.stopTracking:
self.toggleStopTracking()
if self.setDataNan:
self.toggleSetDataNan()
x,y = event.pos().x(),event.pos().y()
self.reflectRoiPos = [[int(x-self.reflectRoiSize[0][0]/2),int(y-self.reflectRoiSize[0][1]/2)]]
if not self.startVideoButton.isChecked():
self.trackReflect()
if self.reflectFound:
self.reflectCenterPlot.setData(x=[self.reflectCenterSeed[0]],y=[self.reflectCenterSeed[1]])
if self.pupilCenterSeed is not None:
self.trackPupil()
self.updatePupilPlot()
if self.findPupilButton.isChecked():
self.updatePupilTrackParamPlots()
else:
self.updatePupilDataPlot()
else:
self.reflectCenterPlot.setData(x=[],y=[])
def imageDoubleClickEvent(self,event):
x,y = event.pos().x(),event.pos().y()
if self.findReflectButton.isChecked():
n = len(self.reflectRoi)
if n<1 or self.trackMenuReflectTypeRing.isChecked():
if n<1:
roiSize = (math.ceil(0.1*max(self.roiSize)),)*2
else:
roiSize = self.reflectRoi[0].size()
self.reflectRoi.append(pg.ROI((int(x-roiSize[0]/2),int(y-roiSize[1]/2)),roiSize,pen='r'))
self.reflectRoi[-1].addScaleHandle(pos=(1,1),center=(0.5,0.5))
self.imageViewBox.addItem(self.reflectRoi[-1])
elif self.setMaskButton.isChecked():
roiSize = math.ceil(0.1*max(self.roiSize))
self.maskRoi.append(pg.ROI((int(x-roiSize/2),int(y-roiSize/2)),(roiSize,)*2,pen='r'))
self.maskRoi[-1].addScaleHandle(pos=(1,1),center=(0.5,0.5))
self.imageViewBox.addItem(self.maskRoi[-1])
elif not self.roiButton.isChecked() and (self.findPupilButton.isChecked() or self.pupilCenterSeed is not None):
if self.stopTracking:
self.toggleStopTracking()
if self.setDataNan:
self.toggleSetDataNan()
self.pupilCenterSeed = (x,y)
if not self.startVideoButton.isChecked() and self.trackMenuPupilMethodStarburst.isChecked():
self.trackPupil()
self.updatePupilPlot()
if self.findPupilButton.isChecked():
self.updatePupilTrackParamPlots()
else:
self.updatePupilDataPlot()
def dataPlotMouseClickEvent(self,event):
if event.button()==QtCore.Qt.RightButton and self.cam is None and not any([button.isChecked() for button in self.buttons]) and (self.negSaccades.size>0 or self.posSaccades.size>0):
pos = self.pupilXPlotItem.getViewBox().mapSceneToView(event.pos())
frame = int(round(pos.x()*self.frameRate))+1
saccades = self.getSaccadesOnDisplay()
if saccades.size>0:
self.selectedSaccade = saccades[np.argmin(np.absolute(saccades-frame))]
def dataPlotDoubleClickEvent(self,event):
if self.cam is None and not any([button.isChecked() for button in self.buttons]):
pos = self.pupilXPlotItem.getViewBox().mapSceneToView(event.pos())
frame = int(round(pos.x()*self.frameRate))+1
vel,_ = self.getPupilVelocity()
n = self.saccadeSmoothPts//2
vel = vel[frame-n:frame-n+self.saccadeSmoothPts]
maxInd = np.argmax(np.absolute(vel))
if not np.isnan(vel[maxInd]):
frame += maxInd-n
if vel[maxInd]<0:
self.negSaccades = np.unique(np.concatenate((self.negSaccades,[frame])))
else:
self.posSaccades = np.unique(np.concatenate((self.posSaccades,[frame])))
self.selectedSaccade = frame
self.plotSaccades()
def setROI(self):
if self.image is None:
return
elif self.roiButton.isChecked():
self.turnOffButtons(source=self.roiButton)
self.resetROI(keepPosAndSize=True)
self.resetImage()
if self.roi is None:
self.roi = pg.ROI(self.roiPos,self.roiSize,maxBounds=None,pen='r')
self.roi.addScaleHandle(pos=(1,1),center=(0.5,0.5))
self.imageViewBox.addItem(self.roi)
else:
self.roi.setVisible(True)
else:
newPos = []
for p,maxSize in zip(self.roi.pos(),self.fullRoiSize):
if p<0:
newPos.append(0)
elif p>maxSize-1:
newPos.append(maxSize-1)
else:
newPos.append(int(p))
newSize = []
for s,p,maxSize in zip(self.roi.size(),newPos,self.fullRoiSize):
if s<1:
newSize.append(1)
elif p+s>maxSize:
newSize.append(maxSize-p)
else:
newSize.append(int(s))
if self.showTracking:
deltaPos = [newPos[i]-self.roiPos[i] for i in (0,1)]
self.pupilCenter -= deltaPos
self.reflectCenter -= deltaPos
if self.pupilCenterSeed is not None:
self.pupilCenterSeed = (self.pupilCenterSeed[0]-deltaPos[0],self.pupilCenterSeed[1]-deltaPos[1])
if self.reflectCenterSeed is not None:
self.reflectCenterSeed = (self.reflectCenterSeed[0]-deltaPos[0],self.reflectCenterSeed[1]-deltaPos[1])
if self.pupilRoi is not None:
self.pupilRoiPos[0] -= deltaPos[0]
self.pupilRoiPos[1] -= deltaPos[1]
for i,roi in enumerate(self.reflectRoi):
self.reflectRoiPos[i][0] -= deltaPos[0]
self.reflectRoiPos[i][1] -= deltaPos[1]
if len(self.maskRoi)>0:
for roi in self.maskRoi:
roi.setPos((roi.pos()[0]-deltaPos[0],roi.pos()[1]-deltaPos[1]))
self.updateMaskIndex()
self.roi.setPos(newPos)
self.roi.setSize(newSize)
self.roiPos = newPos
self.roiSize = newSize
if self.cam is None or self.camType=='webcam':
self.roiInd = np.s_[self.roiPos[1]:self.roiPos[1]+self.roiSize[1],self.roiPos[0]:self.roiPos[0]+self.roiSize[0]]
else:
self.roiInd = np.s_[0:self.roiSize[1],0:self.roiSize[0]]
if self.cam is not None:
if self.camType=='vimba':
self.cam.feature('OffsetX').value = self.roiPos[0]
self.cam.feature('OffsetY').value = self.roiPos[1]
self.cam.feature('Width').value = self.roiSize[0]
self.cam.feature('Height').value = self.roiSize[1]
self.getCamImage()
self.roi.setVisible(False)
self.resetImage()
def setMask(self):
if self.image is None:
return
elif self.setMaskButton.isChecked():
self.turnOffButtons(source=self.setMaskButton)
for roi in self.maskRoi:
roi.setVisible(True)
else:
for roi in self.maskRoi:
roi.setVisible(False)
self.updateMaskIndex()
if self.pupilCenterSeed is not None:
self.trackPupil()
self.updatePupilPlot()
self.updatePupilDataPlot()
def updateMaskIndex(self):
self.maskIndex = []
for roi in self.maskRoi:
x,y = [int(n) for n in roi.pos()]
w,h = [int(n) for n in roi.size()]
self.maskIndex.append(np.s_[y:y+h,x:x+w])
def setUseMask(self):
if self.cam is None and not any([button.isChecked() for button in self.buttons]) and self.pupilCenterSeed is not None:
self.trackPupil()
self.updatePupilPlot()
self.updatePupilDataPlot()
def findPupil(self):
if self.image is None:
return
elif self.findPupilButton.isChecked():
self.turnOffButtons(source=self.findPupilButton)
if self.trackMenuPupilMethodStarburst.isChecked() or self.trackMenuPupilMethodLine.isChecked():
if self.pupilCenterSeed is None:
self.pupilEdgeThresh = 2*self.image[self.roiInd][self.image[self.roiInd]>0].min()
self.minNumPixAboveThresh = 2
self.edgeFilt = np.ones(self.minNumPixAboveThresh)
self.edgeDistThresh = np.array([-6,6])
self.pupilAreaPlot.setData(x=[],y=[])
self.pupilXPlot.setData(x=[],y=[])
self.pupilYPlot.setData(x=[],y=[])
if self.cam is None:
for line in self.frameNumLines:
line.setVisible(False)
self.pupilAreaPlotItem.addItem(self.pupilEdgeThreshLine)
self.pupilEdgeThreshLine.setValue(self.pupilEdgeThresh)
self.pupilXPlotItem.addItem(self.numPixAboveThreshLine)
self.numPixAboveThreshLine.setValue(self.minNumPixAboveThresh)
self.pupilYPlotItem.addItem(self.edgeDistUpperThreshLine)
self.pupilYPlotItem.addItem(self.edgeDistLowerThreshLine)
self.pupilAreaPlotItem.setLabel('left','Pixel Intensity')
self.pupilXPlotItem.setLabel('left','Pixels > Thresh')
self.pupilYPlotItem.setLabel('left','Pupil Edge Dist')
self.pupilYPlotItem.setLabel('bottom','')
if self.pupilCenterSeed is not None:
self.updatePupilTrackParamPlots()
if not self.trackMenuPupilMethodStarburst.isChecked():
if self.pupilRoi is None:
maxBoundsRect = self.imageViewBox.itemBoundingRect(self.imageItem)
self.pupilRoiSize = [int(math.ceil(0.2*max(self.roiSize)))]*2
self.pupilRoiPos = [int(0.5*(self.roiSize[i]-self.pupilRoiSize[i])) for i in (0,1)]
self.pupilRoi = pg.ROI(self.pupilRoiPos,self.pupilRoiSize,maxBounds=maxBoundsRect,pen='r')
self.pupilRoi.addScaleHandle(pos=(1,1),center=(0.5,0.5))
self.pupilRoi.sigRegionChangeFinished.connect(self.pupilRoiRegionChanged)
self.imageViewBox.addItem(self.pupilRoi)
else:
self.pupilRoi.setPos(self.pupilRoiPos)
self.pupilRoi.setSize(self.pupilRoiSize)
self.pupilRoi.setVisible(True)
else:
if self.trackMenuPupilMethodStarburst.isChecked() or self.trackMenuPupilMethodLine.isChecked():
self.pupilEdgePtsPlot.setData(x=[],y=[])
for i in range(len(self.radialProfilePlot)):
self.radialProfilePlot[i].setData(x=[],y=[])
self.radialProfilePixAboveThreshPlot[i].setData(x=[],y=[])
self.edgeDistPlot.setData(x=[],y=[])
self.pupilAreaPlotItem.removeItem(self.pupilEdgeThreshLine)
self.pupilXPlotItem.removeItem(self.numPixAboveThreshLine)
self.pupilYPlotItem.removeItem(self.edgeDistUpperThreshLine)
self.pupilYPlotItem.removeItem(self.edgeDistLowerThreshLine)
if self.cam is None:
for line in self.frameNumLines:
line.setVisible(True)
self.pupilAreaPlotItem.setLabel('left','Pupil Area')
self.pupilXPlotItem.setLabel('left','Pupil X')
self.pupilYPlotItem.setLabel('left','Pupil Y')
self.pupilYPlotItem.setLabel('bottom','Time (s)')
if not self.trackMenuPupilMethodStarburst.isChecked():
self.pupilRoi.setVisible(False)
if self.pupilCenterSeed is not None:
self.pupilAreaRange = [self.pupilArea[self.dataPlotIndex]]*2
self.pupilXRange = [self.pupilX[self.dataPlotIndex]]*2
self.pupilYRange = [self.pupilY[self.dataPlotIndex]]*2
self.setDataPlotXRange()
self.updatePupilDataPlot()
def pupilRoiRegionChanged(self):
self.pupilRoiPos = [int(n) for n in self.pupilRoi.pos()]
self.pupilRoiSize = [int(n) for n in self.pupilRoi.size()]
self.trackPupil()
self.updatePupilPlot()
if self.trackMenuPupilMethodLine.isChecked():
self.updatePupilTrackParamPlots()
def trackPupil(self):
self.pupilFound = False
if not self.setDataNan and (self.reflectCenterSeed is None or self.reflectFound):
img = self.image[self.roiInd].copy()
if self.trackMenuPupilSignPos.isChecked():
img = 255-img
if self.useMaskCheckBox.isChecked() and len(self.maskRoi)>0:
for ind in self.maskIndex:
img[ind] = 0
if self.trackMenuAdaptThresh.isChecked():
self.pupilEdgeThresh += self.image.mean()-self.meanImageIntensity
if self.trackMenuPupilMethodStarburst.isChecked():
self.findPupilWithStarburst(img)
elif self.trackMenuPupilMethodLine.isChecked():
self.findPupilWithLine(img)
elif self.trackMenuPupilMethodGradients.isChecked():
self.findPupilWithGradients(img)
else:
self.findPupilWithIntensity(img)
self.updatePupilData()
def findPupilWithStarburst(self,img):
# get radial profiles and find pupil edges
# radial profile must cross edge thresh for min num of consecutive pix
# radial profile = 0 for masked pixels
if 0<self.pupilCenterSeed[0]<self.roiSize[0]-1 and 0<self.pupilCenterSeed[1]<self.roiSize[1]-1:
x = self.radialLinesX+int(self.pupilCenterSeed[0])
y = self.radialLinesY+int(self.pupilCenterSeed[1])
inFrame = np.logical_and(np.logical_and(x>=0,x<self.roiSize[0]),np.logical_and(y>=0,y<self.roiSize[1]))
self.radialProfiles[:] = 0
self.pupilEdges = np.zeros((self.numRadialLines*2,2),dtype=np.float32)
for i in range(self.numRadialLines):
xInFrame = x[i,inFrame[i,:]]
yInFrame = y[i,inFrame[i,:]]
lineProfile = img[yInFrame,xInFrame]
centerInd = np.where(np.logical_and(xInFrame==int(self.pupilCenterSeed[0]),yInFrame==int(self.pupilCenterSeed[1])))[0][0]
self.radialProfiles[i,:lineProfile.size-centerInd] = lineProfile[centerInd:]
self.radialProfiles[i+self.numRadialLines,:centerInd+1] = lineProfile[centerInd::-1]
edgeInd1 = self.findPupilEdgeIndex(self.radialProfiles[i])
edgeInd2 = self.findPupilEdgeIndex(self.radialProfiles[i+self.numRadialLines])
if edgeInd1 is not None:
self.pupilEdges[i,0] = xInFrame[centerInd+edgeInd1]
self.pupilEdges[i,1] = yInFrame[centerInd+edgeInd1]
if edgeInd2 is not None:
self.pupilEdges[i+self.numRadialLines,0] = xInFrame[centerInd-edgeInd2]
self.pupilEdges[i+self.numRadialLines,1] = yInFrame[centerInd-edgeInd2]
# throw out edge points with outlier distances from center
self.pupilEdges = self.pupilEdges[self.pupilEdges.any(axis=1)]
if self.pupilEdges.shape[0]>0:
self.pupilEdgeDist = np.sqrt(np.sum((self.pupilEdges-self.pupilCenterSeed)**2,axis=1))
normDist = self.pupilEdgeDist-self.pupilEdgeDist.mean()
distThresh = self.edgeDistThresh*self.pupilEdgeDist.std()
self.pupilEdges = self.pupilEdges[np.logical_and(normDist>distThresh[0],normDist<distThresh[1])]
# fit ellipse to edge points
if self.pupilEdges.shape[0]>4:
center,diameter,angle = cv2.fitEllipse(self.pupilEdges)
if 0<center[0]<self.roiSize[0]-1 and 0<center[1]<self.roiSize[1]-1 and diameter[1]>0 and diameter[0]/diameter[1]>self.pupilCircularityThresh:
self.pupilCenterSeed,self.pupilEllipseRadii,self.pupilEllipseAngle = center,[d/2 for d in diameter],angle
self.pupilFound = True
def getRadialLines(self):
angles = np.arange(0,90,20)
slopes = np.append(np.nan,1/np.tan(np.radians(angles[1:])))
self.numRadialLines = 2*angles.size-1
maxLength = max(self.roiSize)
self.radialLinesX = np.zeros((self.numRadialLines,maxLength*2),dtype=np.int16)
self.radialLinesY = np.zeros((self.numRadialLines,maxLength*2),dtype=np.int16)
for i,angle in enumerate(angles):
if angle==0:
self.radialLinesY[i] = np.arange(-maxLength,maxLength)+1
elif angle==90:
self.radialLinesX[i] = np.arange(-maxLength,maxLength)+1
elif angle==45:
self.radialLinesX[i] = np.arange(-maxLength,maxLength)+1
self.radialLinesY[i] = np.arange(-maxLength,maxLength)+1
elif angle<45:
self.radialLinesY[i] = np.arange(-maxLength,maxLength)+1
self.radialLinesX[i] = self.radialLinesY[i,:]/slopes[i] # x = y/m
elif angle>45:
self.radialLinesX[i] = np.arange(-maxLength,maxLength)+1
self.radialLinesY[i] = slopes[i]*self.radialLinesX[i,:] # y = mx
self.radialLinesX[angles.size:] = self.radialLinesX[1:angles.size]
self.radialLinesY[angles.size:] = -self.radialLinesY[1:angles.size]
self.radialProfiles = np.zeros((self.numRadialLines*2,max(self.roiSize)),dtype=np.uint8)
def findPupilEdgeIndex(self,lineProfile):
if self.minNumPixAboveThresh>1:
edgeInd = np.where(np.correlate(lineProfile>self.pupilEdgeThresh,self.edgeFilt,mode='valid')==self.minNumPixAboveThresh)[0]
else:
edgeInd = np.where(lineProfile>self.pupilEdgeThresh)[0]
edgeInd = edgeInd[0] if edgeInd.size>0 else None
return edgeInd
def findPupilWithLine(self,img):
if self.reflectCenterSeed is not None:
self.pupilRoiPos[1]
self.radialProfiles[:] = 0
lineProfile = img[self.pupilRoiPos[1]:self.pupilRoiPos[1]+self.pupilRoiSize[1],self.pupilRoiPos[0]:self.pupilRoiPos[0]+self.pupilRoiSize[0]].mean(axis=0)
if self.trackMenuLineOriginLeft.isChecked():
self.radialProfiles[0,:lineProfile.size] = lineProfile
edgeInd = self.findPupilEdgeIndex(lineProfile)
else:
self.radialProfiles[0,:lineProfile.size] = lineProfile[::-1]
edgeInd = self.findPupilEdgeIndex(lineProfile[::-1])
if edgeInd is not None:
edgeInd = lineProfile.size-1-edgeInd
if edgeInd is not None:
edgeInd += self.pupilRoiPos[0]
if not self.findPupilButton.isChecked() and self.pupilCenterSeed is not None:
self.pupilRoiPos[0] += edgeInd-self.pupilCenterSeed[0]
self.pupilCenterSeed = [edgeInd,self.pupilRoiPos[1]+self.pupilRoiSize[1]//2]
self.pupilFound = True
def findPupilWithGradients(self,img):
# method described by:
# Timm and Barth. Accurate eye centre localisation by means of gradients.
# In Proceedings of the Int. Conference on Computer Theory and Applications
# (VISAPP), volume 1, pages 125-130, Algarve, Portugal, 2011.
# with further details in Tristan Hume's blogpost Nov 4, 2012:
# http://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/
img = img[self.pupilRoiPos[1]:self.pupilRoiPos[1]+self.pupilRoiSize[1],self.pupilRoiPos[0]:self.pupilRoiPos[0]+self.pupilRoiSize[0]]
img[img>230] = 0
if self.pupilGradientDownsample<1:
img = cv2.resize(img,(0,0),fx=self.pupilGradientDownsample,fy=self.pupilGradientDownsample,interpolation=cv2.INTER_AREA)
img = cv2.GaussianBlur(img,(0,0),0.005*img.shape[1])
gradY,gradX = np.gradient(img.astype(float))
gradLength = np.sqrt(gradX**2+gradY**2)
gradIndex = gradLength>np.mean(gradLength)+0.3*np.std(gradLength)
x = np.arange(img.shape[1],dtype=float)
y = np.arange(img.shape[0],dtype=float)
meshX,meshY = np.meshgrid(x,y)
distX = np.tile(np.ravel(meshX[gradIndex])-x[:,None],(img.shape[0],1))
distY = np.repeat(np.ravel(meshY[gradIndex])-y[:,None],img.shape[1],axis=0)
distLength = np.sqrt(distX**2+distY**2)
distLength[distLength==0] = 1
# dot = ((distX/distLength)*(gradX[gradIndex]/gradLength[gradIndex]))+((distY/distLength)*(gradY[gradIndex]/gradLength[gradIndex]))
# use in place array manipulations
distX /= distLength
distY /= distLength
gradLength = gradLength[gradIndex]
gradX = gradX[gradIndex]
gradY = gradY[gradIndex]
gradX /= gradLength
gradY /= gradLength
distX *= gradX
distY *= gradY
distX += distY
dot = distX
dot.clip(min=0,out=dot)
# equation 3 in Timm and Barth 2011
centerWeight = (255-img)[gradIndex]
dot **= 2
dot *= centerWeight
f = np.reshape(dot.sum(axis=1)/dot.shape[1],img.shape)
# remove high f regions connected to edge
_,contours,_ = cv2.findContours((f>0.9*f.max()).astype(np.uint8),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>1:
mask = np.zeros_like(img)
for i,c in enumerate(contours):
if np.in1d(c[:,0,0],[1,img.shape[1]-2]).any() or np.in1d(c[:,0,1],[1,img.shape[0]-2]).any():
cv2.drawContours(mask,contours,i,1,-1)
f[mask.astype(np.bool)] = 0
f[:,[0,-1]] = 0
f[[0,-1],:] = 0
center = np.unravel_index(f.argmax(),f.shape)[::-1]
self.pupilCenterSeed = [int(center[i]/self.pupilGradientDownsample)+self.pupilRoiPos[i] for i in (0,1)]
self.pupilFound = True
def findPupilWithIntensity(self,img):
imgRoi = img[self.pupilRoiPos[1]:self.pupilRoiPos[1]+self.pupilRoiSize[1],self.pupilRoiPos[0]:self.pupilRoiPos[0]+self.pupilRoiSize[0]]
maxInd = np.unravel_index(np.argmax(imgRoi),imgRoi.shape)
self.pupilCenterSeed = [self.pupilRoiPos[0]+maxInd[1],self.pupilRoiPos[1]+maxInd[0]]
self.pupilRoiIntensity = imgRoi.mean()
self.pupilFound = True
def setPupilSign(self):
if self.mainWin.sender() is self.trackMenuPupilSignNeg:
self.trackMenuPupilSignNeg.setChecked(True)
self.trackMenuPupilSignPos.setChecked(False)
else:
self.trackMenuPupilSignNeg.setChecked(False)
self.trackMenuPupilSignPos.setChecked(True)
self.pupilCenterSeed = None
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
def setPupilTrackMethod(self):
methods = (self.trackMenuPupilMethodStarburst,self.trackMenuPupilMethodLine,self.trackMenuPupilMethodGradients,self.trackMenuPupilMethodIntensity)
params = (self.trackMenuCircularity,self.trackMenuLineOrigin,self.trackMenuGradientDownsamp,None)
for method,param in zip(methods,params):
isSelected = method is self.mainWin.sender()
method.setChecked(isSelected)
if param is not None:
param.setEnabled(isSelected)
self.pupilCenterSeed = None
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
def setAdaptiveThreshold(self):
self.meanImageIntensity = self.image.mean()
def setCircularityThresh(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set pupil circularity threshold','ellipse axis length ratio:',value=self.pupilCircularityThresh,min=0.01,max=0.99,decimals=2)
if ok:
self.pupilCircularityThresh = val
def setPupilEdgeLineOrigin(self):
if self.mainWin.sender() is self.trackMenuLineOriginLeft:
self.trackMenuLineOriginLeft.setChecked(True)
self.trackMenuLineOriginRight.setChecked(False)
else:
self.trackMenuLineOriginLeft.setChecked(False)
self.trackMenuLineOriginRight.setChecked(True)
def setPupilGradientDownsample(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set pupil gradient downsample','fraction of pixels:',value=self.pupilGradientDownsample,min=0.1,max=1,decimals=2)
if ok:
self.pupilGradientDownsample = val
def updatePupilPlot(self):
if self.pupilFound and (self.reflectCenterSeed is None or self.reflectFound):
self.pupilCenterPlot.setData(x=[self.pupilCenterSeed[0]],y=[self.pupilCenterSeed[1]])
if self.trackMenuPupilMethodStarburst.isChecked():
angle = self.pupilEllipseAngle*math.pi/180
sinx = np.sin(np.arange(0,370,10)*math.pi/180)
cosx = np.cos(np.arange(0,370,10)*math.pi/180)
self.pupilEllipsePlot.setData(x=self.pupilCenterSeed[0]+self.pupilEllipseRadii[0]*cosx*math.cos(angle)-self.pupilEllipseRadii[1]*sinx*math.sin(angle),
y=self.pupilCenterSeed[1]+self.pupilEllipseRadii[0]*cosx*math.sin(angle)+self.pupilEllipseRadii[1]*sinx*math.cos(angle))
else:
self.pupilCenterPlot.setData(x=[],y=[])
self.pupilEllipsePlot.setData(x=[],y=[])
def updatePupilData(self):
if self.cam is None:
self.dataPlotIndex = self.frameNum-1
else:
leadingPtsInd = np.s_[self.dataPlotIndex+1:self.dataPlotIndex+math.ceil(self.numDataPlotPts/10)]
self.pupilArea[leadingPtsInd] = np.nan
self.pupilX[leadingPtsInd,:] = np.nan
self.pupilY[leadingPtsInd,:] = np.nan
if self.pupilFound and (self.reflectCenterSeed is None or self.reflectFound):
self.pupilCenter[self.dataPlotIndex,:] = self.pupilCenterSeed
if self.trackMenuPupilMethodStarburst.isChecked():
self.pupilArea[self.dataPlotIndex] = math.pi*self.pupilEllipseRadii[1]**2
if not np.isnan(self.mmPerPixel):
self.pupilArea[self.dataPlotIndex] *= self.mmPerPixel**2
elif self.trackMenuPupilMethodIntensity.isChecked():
self.pupilArea[self.dataPlotIndex] = self.pupilRoiIntensity
if self.reflectCenterSeed is None:
self.pupilX[self.dataPlotIndex] = self.pupilCenterSeed[0]
self.pupilY[self.dataPlotIndex] = self.pupilCenterSeed[1]
elif self.trackMenuReflectTypeSpot.isChecked() or not self.trackMenuPupilMethodStarburst.isChecked() or np.isnan(self.mmPerPixel):
self.pupilX[self.dataPlotIndex] = self.pupilCenterSeed[0]-self.reflectCenterSeed[0]
self.pupilY[self.dataPlotIndex] = self.pupilCenterSeed[1]-self.reflectCenterSeed[1]
else:
try:
pupilRotRadius = (self.lensRotRadius**2-(self.pupilEllipseRadii[1]*self.mmPerPixel)**2)**0.5-self.lensOffset
self.pupilX[self.dataPlotIndex],self.pupilY[self.dataPlotIndex] = [180/math.pi*math.asin((self.mmPerPixel*(self.reflectCenterSeed[i]-self.pupilCenterSeed[i])*pupilRotRadius/(pupilRotRadius-self.corneaOffset))/pupilRotRadius) for i in (0,1)]
except:
self.pupilArea[self.dataPlotIndex] = np.nan
self.pupilX[self.dataPlotIndex] = np.nan
self.pupilY[self.dataPlotIndex] = np.nan
else:
self.pupilArea[self.dataPlotIndex] = np.nan
self.pupilX[self.dataPlotIndex] = np.nan
self.pupilY[self.dataPlotIndex] = np.nan
def updatePupilDataPlot(self,updatePlotN=[True,True,True]):
if self.cam is None:
if self.frameNum>self.numDataPlotPts:
plotTime = self.dataPlotTime+(self.frameNum-self.numDataPlotPts)/self.frameRate
dataPlotInd = np.s_[self.frameNum-self.numDataPlotPts:self.frameNum]
else:
plotTime = self.dataPlotTime
dataPlotInd = np.s_[0:self.numDataPlotPts]
if updatePlotN[0]:
self.pupilAreaPlotItem.setXRange(plotTime[0],plotTime[-1])
if updatePlotN[1]:
self.pupilXPlotItem.setXRange(plotTime[0],plotTime[-1])
if updatePlotN[2]:
self.pupilYPlotItem.setXRange(plotTime[0],plotTime[-1])
else:
plotTime = self.dataPlotTime
dataPlotInd = np.s_[0:self.numDataPlotPts]
connectPts = np.logical_not(np.isnan(self.pupilX[dataPlotInd])).astype(np.uint32)
if updatePlotN[0]:
self.setDataPlotYRange(self.pupilAreaPlotItem,self.pupilAreaRange,np.nanmin(self.pupilArea[dataPlotInd]),np.nanmax(self.pupilArea[dataPlotInd]))
self.pupilAreaPlot.setData(x=plotTime,y=self.pupilArea[dataPlotInd],connect=connectPts)
if self.cam is None:
self.pupilAreaFrameNumLine.setValue((self.frameNum-1)/self.frameRate)
if updatePlotN[1]:
self.setDataPlotYRange(self.pupilXPlotItem,self.pupilXRange,np.nanmin(self.pupilX[dataPlotInd]),np.nanmax(self.pupilX[dataPlotInd]))
self.pupilXPlot.setData(x=plotTime,y=self.pupilX[dataPlotInd],connect=connectPts)
if self.cam is None:
self.pupilXFrameNumLine.setValue((self.frameNum-1)/self.frameRate)
if updatePlotN[2]:
self.setDataPlotYRange(self.pupilYPlotItem,self.pupilYRange,np.nanmin(self.pupilY[dataPlotInd]),np.nanmax(self.pupilY[dataPlotInd]))
self.pupilYPlot.setData(x=plotTime,y=self.pupilY[dataPlotInd],connect=connectPts)
if self.cam is None:
self.pupilYFrameNumLine.setValue((self.frameNum-1)/self.frameRate)
def setDataPlotXRange(self):
self.pupilAreaPlotItem.setXRange(0,self.dataPlotDur)
self.pupilXPlotItem.setXRange(0,self.dataPlotDur)
self.pupilYPlotItem.setXRange(0,self.dataPlotDur)
tickSpacing = self.getTickSpacing(self.dataPlotDur)
self.pupilAreaPlotItem.getAxis('bottom').setTickSpacing(levels=[(tickSpacing,0)])
self.pupilXPlotItem.getAxis('bottom').setTickSpacing(levels=[(tickSpacing,0)])
self.pupilYPlotItem.getAxis('bottom').setTickSpacing(levels=[(tickSpacing,0)])
def setDataPlotYRange(self,dataPlotItem,dataPlotRange,Ymin,Ymax):
if not np.isnan(Ymin) and not np.isnan(Ymax):
midRange = (dataPlotRange[1]-dataPlotRange[0])/2
if not dataPlotRange[0]<Ymin<midRange or not midRange<Ymax<dataPlotRange[1]:
dataPlotRange[0] = Ymin*0.8 if Ymin>0 else Ymin*1.2
dataPlotRange[1] = Ymax*1.2 if Ymax>0 else Ymax*0.8
dataPlotItem.setYRange(dataPlotRange[0],dataPlotRange[1])
dataPlotItem.getAxis('left').setTickSpacing(levels=[(self.getTickSpacing(dataPlotRange[1]-dataPlotRange[0]),0)])
def getTickSpacing(self,dataRange):
spacing = 10**(math.floor(math.log10(dataRange)))
spacing *= 0.5*(dataRange//spacing)
return spacing
def updatePupilTrackParamPlots(self):
# recall trackPupil() first so that measurments reflect calculated pupil center
self.trackPupil()
self.updatePupilPlot()
xmax = 0
for i in range(len(self.radialProfilePlot)):
if any(self.radialProfiles[i]):
xmax = max([xmax,np.where(self.radialProfiles[i])[0][-1]])
self.radialProfilePlot[i].setData(self.radialProfiles[i])
self.radialProfilePixAboveThreshPlot[i].setData(np.correlate(self.radialProfiles[i]>self.pupilEdgeThresh,np.ones(self.minNumPixAboveThresh)))
xTickSpacing = self.getTickSpacing(xmax)
self.pupilAreaPlotItem.setRange(xRange=(0,xmax),yRange=(max(0,2*self.pupilEdgeThresh-255),min(255,2*self.pupilEdgeThresh)))
self.pupilAreaPlotItem.getAxis('left').setTickSpacing(levels=[(self.getTickSpacing(self.pupilEdgeThresh*2),0)])
self.pupilAreaPlotItem.getAxis('bottom').setTickSpacing(levels=[(xTickSpacing,0)])
self.pupilXPlotItem.setRange(xRange=(0,xmax),yRange=(0,2*self.minNumPixAboveThresh))
self.pupilXPlotItem.getAxis('left').setTickSpacing(levels=[(round(self.minNumPixAboveThresh/2),0)])
self.pupilXPlotItem.getAxis('bottom').setTickSpacing(levels=[(xTickSpacing,0)])
if self.trackMenuPupilMethodStarburst.isChecked():
self.pupilEdgePtsPlot.setData(self.pupilEdges)
if self.pupilEdges.shape[0]>0:
self.edgeDistPlot.setData(x=np.arange(self.pupilEdgeDist.size)+1,y=self.pupilEdgeDist)
self.edgeDistUpperThreshLine.setValue(self.pupilEdgeDist.mean()+self.edgeDistThresh[1]*self.pupilEdgeDist.std())
self.edgeDistLowerThreshLine.setValue(self.pupilEdgeDist.mean()+self.edgeDistThresh[0]*self.pupilEdgeDist.std())
self.pupilYPlotItem.setRange(xRange=(1,self.pupilEdgeDist.size),yRange=(0,max(np.append(self.pupilEdgeDist,self.edgeDistUpperThreshLine.value()))))
self.pupilYPlotItem.getAxis('left').setTickSpacing(levels=[(self.getTickSpacing(self.pupilEdgeDist.mean()*2),0)])
self.pupilYPlotItem.getAxis('bottom').setTickSpacing(levels=[(self.getTickSpacing(self.pupilEdges.shape[0]),0)])
else:
self.edgeDistPlot.setData(x=[],y=[])
self.edgeDistUpperThreshLine.setValue(2)
self.edgeDistLowerThreshLine.setValue(-1)
self.pupilYPlotItem.setRange(xRange=(0,1),yRange=(0,1))
self.pupilYPlotItem.getAxis('left').setTickSpacing(levels=[(1,0)])
self.pupilYPlotItem.getAxis('bottom').setTickSpacing(levels=[(1,0)])
def setPupilEdgeThresh(self):
self.pupilEdgeThresh = self.pupilEdgeThreshLine.value()
self.trackPupil()
self.updatePupilPlot()
self.updatePupilTrackParamPlots()
def setMinNumPixAboveThresh(self):
self.minNumPixAboveThresh = int(round(self.numPixAboveThreshLine.value()))
self.edgeFilt = np.ones(self.minNumPixAboveThresh)
self.trackPupil()
self.updatePupilPlot()
self.updatePupilTrackParamPlots()
def setEdgeDistThresh(self):
meanEdgeDist = self.pupilEdgeDist.mean()
upperThresh = self.edgeDistUpperThreshLine.value()
if upperThresh<meanEdgeDist:
upperThresh = math.ceil(1.2*meanEdgeDist)
lowerThresh = self.edgeDistLowerThreshLine.value()
if lowerThresh>meanEdgeDist:
lowerThresh = math.floor(0.8*meanEdgeDist)
self.edgeDistThresh = (np.array([lowerThresh,upperThresh])-meanEdgeDist)/self.pupilEdgeDist.std()
self.trackPupil()
self.updatePupilPlot()
self.updatePupilTrackParamPlots()
def findReflect(self):
if self.image is None:
return
elif self.findReflectButton.isChecked():
self.turnOffButtons(source=self.findReflectButton)
self.reflectCenterPlot.setData(x=[],y=[])
for i,roi in enumerate(self.reflectRoi):
roi.setPos(self.reflectRoiPos[i])
roi.setSize(self.reflectRoiSize[i])
roi.setVisible(True)
elif len(self.reflectRoi)>0:
self.reflectRoiPos = []
self.reflectRoiSize = []
for roi in self.reflectRoi:
roi.setVisible(False)
self.reflectRoiPos.append([int(n) for n in roi.pos()])
self.reflectRoiSize.append([int(n) for n in roi.size()])
if self.trackMenuReflectTypeSpot.isChecked() or len(self.reflectRoi)>1:
if self.trackMenuReflectTypeRing.isChecked():
self.getReflectTemplate()
if not self.reflectFound:
return
self.trackReflect()
if self.reflectFound:
self.reflectCenterPlot.setData(x=[self.reflectCenterSeed[0]],y=[self.reflectCenterSeed[1]])
if self.pupilCenterSeed is not None:
self.updatePupilData()
self.updatePupilDataPlot()
def trackReflect(self):
self.reflectFound = False
if not self.setDataNan:
roiPos,roiSize = self.reflectRoiPos[0],self.reflectRoiSize[0]
if self.trackMenuReflectTypeSpot.isChecked():
y,x = np.where(self.image[self.roiInd][roiPos[1]:roiPos[1]+roiSize[1],roiPos[0]:roiPos[0]+roiSize[0]]>self.reflectThresh)
if any(y):
self.reflectCenterSeed = (roiPos[0]+x.mean(),roiPos[1]+y.mean())
else:
return
else:
y,x = np.unravel_index(np.argmax(scipy.signal.fftconvolve(self.image[self.roiInd][roiPos[1]:roiPos[1]+roiSize[1],roiPos[0]:roiPos[0]+roiSize[0]],self.reflectTemplate,mode='same')),roiSize)
center = (roiPos[0]+x,roiPos[1]+y)
if any((center[i]-roiSize[i]<0 or center[i]+roiSize[i]>self.roiSize[i]-1 for i in [0,1])):
return
self.reflectCenterSeed = center
self.reflectRoiPos = [[int(self.reflectCenterSeed[0]-roiSize[0]/2),int(self.reflectCenterSeed[1]-roiSize[1]/2)]]
self.reflectFound = True
if self.cam is None:
self.reflectCenter[self.frameNum-1,:] = self.reflectCenterSeed
else:
self.reflectCenter[self.dataPlotIndex,:] = self.reflectCenterSeed
def getReflectTemplate(self):
spotCenters = np.zeros((len(self.reflectRoi),2))
ptsAboveThresh = []
for i,(roiPos,roiSize) in enumerate(zip(self.reflectRoiPos,self.reflectRoiSize)):
y,x = np.where(self.image[self.roiInd][roiPos[1]:roiPos[1]+roiSize[1],roiPos[0]:roiPos[0]+roiSize[0]]>self.reflectThresh)
if any(y):
spotCenters[i,:] = (roiPos[0]+x.mean(),roiPos[1]+y.mean())
ptsAboveThresh = min(ptsAboveThresh,len(y))
else:
self.reflectFound = False
return
self.reflectFound = True
for roi in self.reflectRoi[1:]:
self.imageViewBox.removeItem(roi)
del(self.reflectRoi[1:])
self.reflectCenterSeed = spotCenters.mean(axis=0)
roiSize = 4*int(max(spotCenters.max(axis=0)-spotCenters.min(axis=0)))
self.reflectRoiSize = [[roiSize]*2]
self.reflectTemplate = np.zeros((roiSize,)*2,dtype=bool)
self.reflectRoiPos = [[int(self.reflectCenterSeed[0]-roiSize/2),int(self.reflectCenterSeed[1]-roiSize/2)]]
spotCenters = (spotCenters-(self.reflectCenterSeed-roiSize/2)).astype(int)
m,n = int(ptsAboveThresh/2),int(round(ptsAboveThresh/2))
for center in spotCenters:
self.reflectTemplate[center[1]-m:center[1]+n,center[0]-m:center[0]+n] = True
def setReflectType(self):
if self.mainWin.sender() is self.trackMenuReflectTypeSpot:
self.trackMenuReflectTypeSpot.setChecked(True)
self.trackMenuReflectTypeRing.setChecked(False)
else:
self.trackMenuReflectTypeSpot.setChecked(False)
self.trackMenuReflectTypeRing.setChecked(True)
if len(self.reflectRoi)>0:
for roi in self.reflectRoi:
self.imageViewBox.removeItem(roi)
self.reflectRoi = []
self.reflectCenterSeed = None
self.reflectCenterPlot.setData(x=[],y=[])
def setReflectThresh(self):
val,ok = QtWidgets.QInputDialog.getInt(self.mainWin,'Set Reflection Threshold','Pixel intensity:',value=self.reflectThresh,min=0,max=254)
if ok:
self.reflectThresh = val
def turnOffButtons(self,source=None):
for button in self.buttons:
if button is not source and button.isChecked():
button.click()
def addFrameNumLines(self):
self.pupilAreaPlotItem.addItem(self.pupilAreaFrameNumLine)
self.pupilXPlotItem.addItem(self.pupilXFrameNumLine)
self.pupilYPlotItem.addItem(self.pupilYFrameNumLine)
def removeFrameNumLines(self):
self.pupilAreaPlotItem.removeItem(self.pupilAreaFrameNumLine)
self.pupilXPlotItem.removeItem(self.pupilXFrameNumLine)
self.pupilYPlotItem.removeItem(self.pupilYFrameNumLine)
def frameNumLineDragged(self):
source = self.mainWin.sender()
for line in self.frameNumLines:
if line is not source:
line.setValue(source.value())
def frameNumLinePosChangeFin(self):
source = self.mainWin.sender()
self.frameNumSpinBox.setValue(round(source.value()*self.frameRate)+1)
def goToFrame(self):
self.frameNum = self.frameNumSpinBox.value()
if self.videoIn is not None:
self.videoIn.set(cv2.CAP_PROP_POS_FRAMES,self.frameNum-1)
self.getVideoImage()
self.updateDisplay(showAll=True)
def changePlotWindowDur(self,fullRange=False):
if fullRange:
newVal = self.numFrames/self.frameRate
else:
newVal = float(self.plotDurEdit.text())
if self.cam is None:
if newVal<3/self.frameRate:
newVal = 3/self.frameRate
elif newVal>self.numFrames/self.frameRate:
newVal = self.numFrames/self.frameRate
else:
if np.isnan(self.frameRate):
newVal = 3 if newVal<3 else int(newVal)
elif newVal<3/self.frameRate:
newVal = 3/self.frameRate
self.plotDurEdit.setText(str(round(newVal,3)))
self.dataPlotDur = newVal
if self.cam is not None:
self.resetPupilData()
self.setDataPlotTime()
self.setDataPlotXRange()
if all(np.isnan(self.pupilX)):
self.resetPupilDataPlot()
else:
self.updatePupilDataPlot()
def setDataPlotDur(self):
self.dataPlotDur = self.defaultDataPlotDur
self.pupilYPlotItem.setLabel('bottom','Time (s)')
if np.isnan(self.frameRate):
self.dataPlotDur = 60
self.pupilYPlotItem.setLabel('bottom','Frame')
elif self.cam is None and self.defaultDataPlotDur>self.numFrames/self.frameRate:
self.dataPlotDur = self.numFrames/self.frameRate
self.plotDurEdit.setText(str(round(self.dataPlotDur,3)))
def setDataPlotTime(self):
if np.isnan(self.frameRate):
self.dataPlotTime = np.arange(self.dataPlotDur)
else:
self.dataPlotTime = np.arange(0,self.dataPlotDur-0.5/self.frameRate,1/self.frameRate)
self.numDataPlotPts = self.dataPlotTime.size
def setMmPerPix(self):
val = 0 if np.isnan(self.mmPerPixel) else self.mmPerPixel
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set mm/pixel','mm/pixel:',value=val,min=0,decimals=4)
if ok:
self.mmPerPixel = val if val>0 else np.nan
def measureMmPerPix(self):
if self.reflectCenterSeed is None:
QtWidgets.QMessageBox.about(self.mainWin,'Set mm/pixel','First find reflection')
else:
p = int(0.5*self.frameRate)
avgPts = p if self.numDataPlotPts>=p else self.numDataPlotPts
initialReflectCenter = self.getAvgReflectCenter(avgPts)
QtWidgets.QMessageBox.about(self.mainWin,'Set mm/pixel','Move camera 0.5 mm; then press ok')
finalReflectCenter = self.getAvgReflectCenter(avgPts)
self.mmPerPixel = 0.5/np.sqrt(np.sum((finalReflectCenter-initialReflectCenter)**2))
def getAvgReflectCenter(self,avgPts):
return np.mean(np.tile(self.reflectCenter,(2,1))[:self.numDataPlotPts+self.dataPlotIndex,:][-avgPts:,:],axis=0)
def analyzeAllFrames(self):
while self.frameNum<self.numFrames:
self.frameNum += 1
self.getVideoImage()
if self.frameNum==self.numFrames:
self.updateDisplay(showAll=True)
self.changePlotWindowDur(fullRange=True)
else:
self.updateDisplay(showNone=True)
def pixToDeg(self):
pupilEllipseRadius = (self.pupilArea/math.pi)**0.5
pupilRotRadius = (self.lensRotRadius**2-(pupilEllipseRadius*self.mmPerPixel)**2)**0.5-self.lensOffset
self.pupilX,self.pupilY = [180/math.pi*np.arcsin((self.mmPerPixel*(self.reflectCenter[:,i]-self.pupilCenter[:,i])*pupilRotRadius/(pupilRotRadius-self.corneaOffset))/pupilRotRadius) for i in (0,1)]
self.pupilArea *= self.mmPerPixel**2
self.updatePupilDataPlot()
def degToPix(self):
self.pupilArea /= self.mmPerPixel**2
self.pupilX,self.pupilY = [self.pupilCenter[:,i]-self.reflectCenter[:,i] for i in (0,1)]
self.updatePupilDataPlot()
def plotFrameIntervals(self):
if np.all(np.isnan(self.frameTimes)):
return
frameNum = np.arange(1,self.numFrames+1)
frameIntervals = np.diff(self.frameTimes)*1e3
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax.plot(frameNum,self.frameID)
ax.set_ylabel('Frame ID')
ax = fig.add_subplot(2,1,2)
ax.plot(frameNum[1:],frameIntervals)
ax.set_ylim([0,plt.get(ax,'ylim')[1]])
ax.set_xlabel('Frame Number')
ax.set_ylabel('Frame Interval (ms)')
plt.show()
def findSaccades(self):
# find peaks in pupil velocity
vel,t = self.getPupilVelocity()
thresh = self.saccadeThresh*np.nanstd(vel)
self.negSaccades = np.where((vel<-thresh) & np.concatenate(([False],vel[1:]<vel[:-1])) & np.concatenate((vel[:-1]<vel[1:],[False])))[0]
self.posSaccades = np.where((vel>thresh) & np.concatenate(([False],vel[1:]>vel[:-1])) & np.concatenate((vel[:-1]>vel[1:],[False])))[0]
# remove peaks that are too close in time
self.negSaccades = self.negSaccades[np.concatenate(([True],np.diff(t[self.negSaccades])>self.saccadeRefractoryPeriod))]
self.posSaccades = self.posSaccades[np.concatenate(([True],np.diff(t[self.posSaccades])>self.saccadeRefractoryPeriod))]
# remove negative peaks too closely following positive peaks and vice versa
peakTimeDiff = t[self.negSaccades]-t[self.posSaccades][:,None]
self.negSaccades = self.negSaccades[np.all(np.logical_or(peakTimeDiff<0,peakTimeDiff>self.saccadeRefractoryPeriod),axis=0)]
self.posSaccades = self.posSaccades[np.all(np.logical_or(peakTimeDiff>0,peakTimeDiff<-self.saccadeRefractoryPeriod),axis=1)]
self.selectedSaccade = None
self.plotSaccades()
def getPupilVelocity(self):
if np.all(np.isnan(self.frameTimes)):
t = np.arange(self.numFrames)/self.frameRate
else:
t = self.frameTimes
n = self.saccadeSmoothPts//2
vel = np.diff(self.pupilX)/np.diff(t)
velSmoothed = np.convolve(vel,np.ones(self.saccadeSmoothPts)/self.saccadeSmoothPts,mode='same')
velSmoothed[:n] = vel[:n].mean()
velSmoothed[-n:] = vel[-n:].mean()
return velSmoothed,t
def plotSaccades(self):
t = np.arange(self.numFrames)/self.frameRate
self.negSaccadesPlot.setData(x=t[self.negSaccades],y=self.pupilX[self.negSaccades])
self.posSaccadesPlot.setData(x=t[self.posSaccades],y=self.pupilX[self.posSaccades])
def getSaccadesOnDisplay(self):
saccades = np.sort(np.concatenate((self.negSaccades,self.posSaccades)))
if saccades.size>0:
if self.frameNum>self.numDataPlotPts:
onDisplay = np.logical_and(saccades>self.frameNum-self.numDataPlotPts,saccades<self.frameNum)
else:
onDisplay = saccades<self.numDataPlotPts
saccades = saccades[onDisplay]
return saccades
def deleteAllSaccades(self):
self.negSaccadesPlot.setData(x=[],y=[])
self.posSaccadesPlot.setData(x=[],y=[])
self.negSaccades = np.array([],dtype=int)
self.posSaccades = self.negSaccades.copy()
self.selectedSaccade = None
def setSaccadeSmooth(self):
val,ok = QtWidgets.QInputDialog.getInt(self.mainWin,'Set Saccade Smoothing','number of points:',value=self.saccadeSmoothPts,min=1)
if ok:
self.saccadeSmoothPts = val
def setSaccadeThresh(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Saccade Threshold','standard devations from baseline:',value=self.saccadeThresh,min=0.1,decimals=1)
if ok:
self.saccadeThresh = val
def setSaccadeRefractoryPeriod(self):
val,ok = QtWidgets.QInputDialog.getDouble(self.mainWin,'Set Saccade Refractory Period','seconds:',value=self.saccadeRefractoryPeriod,min=0.001,decimals=3)
if ok:
self.saccadeRefractoryPeriod = val
if __name__=="__main__":
start() | mit |
MartinDelzant/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
corburn/scikit-bio | skbio/draw/_distributions.py | 10 | 30987 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
from itertools import cycle
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon, Rectangle
import six
from skbio.util._decorator import deprecated
distribution_plot_deprecation_p = {
'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
"Plots that are not specific to bioinformatics should be generated "
"with seaborn or another general-purpose plotting package."
)}
@deprecated(**distribution_plot_deprecation_p)
def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
x_label=None, y_label=None, x_tick_labels_orientation='vertical',
y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
box_colors=None, figure_width=None, figure_height=None,
legend=None):
"""Generate a figure with a boxplot for each distribution.
Parameters
----------
distributions: 2-D array_like
Distributions to plot. A boxplot will be created for each distribution.
x_values : list of numbers, optional
List indicating where each boxplot should be placed. Must be the same
length as `distributions` if provided.
x_tick_labels : list of str, optional
List of x-axis tick labels.
title : str, optional
Title of the plot.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of the x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
Length of the whiskers as a function of the IQR. For example, if 1.5,
the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
treated as an outlier.
box_width : scalar, optional
Width of each box in plot units.
box_colors : str, tuple, or list of colors, optional
Either a matplotlib-compatible string or tuple that indicates the color
to be used for every boxplot, or a list of colors to color each boxplot
individually. If ``None``, boxes will be the same color as the plot
background. If a list of colors is provided, a color must be provided
for each boxplot. Can also supply ``None`` instead of a color, which
will color the box the same color as the plot background.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
legend : tuple or list, optional
Two-element tuple or list that contains a list of valid matplotlib
colors as the first element and a list of labels (strings) as the
second element. The lengths of the first and second elements must be
the same. If ``None``, a legend will not be plotted.
Returns
-------
matplotlib.figure.Figure
Figure containing a boxplot for each distribution.
See Also
--------
matplotlib.pyplot.boxplot
scipy.stats.ttest_ind
Notes
-----
This is a convenience wrapper around matplotlib's ``boxplot`` function that
allows for coloring of boxplots and legend generation.
Examples
--------
Create a plot with two boxplots:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
Plot three distributions with custom colors and labels:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots(
... [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
... x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
... box_colors=('green', 'blue', 'red'))
"""
distributions = _validate_distributions(distributions)
num_dists = len(distributions)
_validate_x_values(x_values, x_tick_labels, num_dists)
# Create a new figure to plot our data on, and then plot the distributions.
fig, ax = plt.subplots()
box_plot = plt.boxplot(distributions, positions=x_values,
whis=whisker_length, widths=box_width)
if box_colors is not None:
if _is_single_matplotlib_color(box_colors):
box_colors = [box_colors] * num_dists
_color_box_plot(ax, box_plot, box_colors)
# Set up the various plotting options, such as x- and y-axis labels, plot
# title, and x-axis values if they have been supplied.
_set_axes_options(ax, title, x_label, y_label,
x_tick_labels=x_tick_labels,
x_tick_labels_orientation=x_tick_labels_orientation,
y_min=y_min, y_max=y_max)
if legend is not None:
if len(legend) != 2:
raise ValueError("Invalid legend was provided. The legend must be "
"a two-element tuple/list where the first "
"element is a list of colors and the second "
"element is a list of labels.")
_create_legend(ax, legend[0], legend[1], 'colors')
_set_figure_size(fig, figure_width, figure_height)
return fig
@deprecated(**distribution_plot_deprecation_p)
def grouped_distributions(plot_type, data, x_values=None,
data_point_labels=None, distribution_labels=None,
distribution_markers=None, x_label=None,
y_label=None, title=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None, whisker_length=1.5,
error_bar_type='stdv', distribution_width=None,
figure_width=None, figure_height=None):
"""Generate a figure with distributions grouped at points along the x-axis.
Parameters
----------
plot_type : {'bar', 'scatter', 'box'}
Type of plot to visualize distributions with.
data : list of lists of lists
Each inner list represents a data point along the x-axis. Each data
point contains lists of data for each distribution in the group at that
point. This nesting allows for the grouping of distributions at each
data point.
x_values : list of scalars, optional
Spacing of data points along the x-axis. Must be the same length as the
number of data points and be in ascending sorted order. If not
provided, plots will be spaced evenly.
data_point_labels : list of str, optional
Labels for data points.
distribution_labels : list of str, optional
Labels for each distribution in a data point grouping.
distribution_markers : list of str or list of tuple, optional
Matplotlib-compatible strings or tuples that indicate the color or
symbol to be used to distinguish each distribution in a data point
grouping. Colors will be used for bar charts or box plots, while
symbols will be used for scatter plots.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
title : str, optional
Plot title.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
If `plot_type` is ``'box'``, determines the length of the whiskers as a
function of the IQR. For example, if 1.5, the whiskers extend to
``1.5 * IQR``. Anything outside of that range is seen as an outlier.
If `plot_type` is not ``'box'``, this parameter is ignored.
error_bar_type : {'stdv', 'sem'}
Type of error bars to use if `plot_type` is ``'bar'``. Can be either
``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
of the mean. If `plot_type` is not ``'bar'``, this parameter is
ignored.
distribution_width : scalar, optional
Width in plot units of each individual distribution (e.g. each bar if
the plot type is a bar chart, or the width of each box if the plot type
is a boxplot). If None, will be automatically determined.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
Returns
-------
matplotlib.figure.Figure
Figure containing distributions grouped at points along the x-axis.
Examples
--------
Create a plot with two distributions grouped at three points:
.. plot::
>>> from skbio.draw import grouped_distributions
>>> fig = grouped_distributions('bar',
... [[[2, 2, 1,], [0, 1, 4]],
... [[1, 1, 1], [4, 4.5]],
... [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
... distribution_labels=['Treatment 1',
... 'Treatment 2'])
"""
# Set up different behavior based on the plot type.
if plot_type == 'bar':
plotting_function = _plot_bar_data
distribution_centered = False
marker_type = 'colors'
elif plot_type == 'scatter':
plotting_function = _plot_scatter_data
distribution_centered = True
marker_type = 'symbols'
elif plot_type == 'box':
plotting_function = _plot_box_data
distribution_centered = True
marker_type = 'colors'
else:
raise ValueError("Invalid plot type '%s'. Supported plot types are "
"'bar', 'scatter', or 'box'." % plot_type)
num_points, num_distributions = _validate_input(data, x_values,
data_point_labels,
distribution_labels)
# Create a list of matplotlib markers (colors or symbols) that can be used
# to distinguish each of the distributions. If the user provided a list of
# markers, use it and loop around to the beginning if there aren't enough
# markers. If they didn't provide a list, or it was empty, use our own
# predefined list of markers (again, loop around to the beginning if we
# need more markers).
distribution_markers = _get_distribution_markers(marker_type,
distribution_markers,
num_distributions)
# Now calculate where each of the data points will start on the x-axis.
x_locations = _calc_data_point_locations(num_points, x_values)
assert (len(x_locations) == num_points), "The number of x_locations " +\
"does not match the number of data points."
if distribution_width is None:
# Find the smallest gap between consecutive data points and divide this
# by the number of distributions + 1 for some extra spacing between
# data points.
min_gap = max(x_locations)
for i in range(len(x_locations) - 1):
curr_gap = x_locations[i + 1] - x_locations[i]
if curr_gap < min_gap:
min_gap = curr_gap
distribution_width = min_gap / float(num_distributions + 1)
else:
if distribution_width <= 0:
raise ValueError("The width of a distribution cannot be less than "
"or equal to zero.")
result, plot_axes = plt.subplots()
# Iterate over each data point, and plot each of the distributions at that
# data point. Increase the offset after each distribution is plotted,
# so that the grouped distributions don't overlap.
for point, x_pos in zip(data, x_locations):
dist_offset = 0
for dist_index, dist, dist_marker in zip(range(num_distributions),
point, distribution_markers):
dist_location = x_pos + dist_offset
plotting_function(plot_axes, dist, dist_marker, distribution_width,
dist_location, whisker_length, error_bar_type)
dist_offset += distribution_width
# Set up various plot options that are best set after the plotting is done.
# The x-axis tick marks (one per data point) are centered on each group of
# distributions.
plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
num_distributions,
distribution_width,
distribution_centered))
_set_axes_options(plot_axes, title, x_label, y_label, x_values,
data_point_labels, x_tick_labels_orientation, y_min,
y_max)
if distribution_labels is not None:
_create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type)
_set_figure_size(result, figure_width, figure_height)
# matplotlib seems to sometimes plot points on the rightmost edge of the
# plot without adding padding, so we need to add our own to both sides of
# the plot. For some reason this has to go after the call to draw(),
# otherwise matplotlib throws an exception saying it doesn't have a
# renderer. Boxplots need extra padding on the left.
if plot_type == 'box':
left_pad = 2 * distribution_width
else:
left_pad = distribution_width
plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
plot_axes.get_xlim()[1] + distribution_width)
return result
def _validate_distributions(distributions):
dists = []
for distribution in distributions:
try:
distribution = np.asarray(distribution, dtype=float)
except ValueError:
raise ValueError("Each value in each distribution must be "
"convertible to a number.")
# Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
# ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
# https://github.com/matplotlib/matplotlib/pull/3571). In order to
# support empty distributions across mpl versions, we replace them with
# [np.nan]. See https://github.com/pydata/pandas/issues/8382,
# https://github.com/matplotlib/matplotlib/pull/3571, and
# https://github.com/pydata/pandas/pull/8240 for details.
# If we decide to only support mpl > 1.4.0 in the future, this code can
# likely be removed in favor of letting mpl handle empty distributions.
if distribution.size > 0:
dists.append(distribution)
else:
dists.append(np.array([np.nan]))
return dists
def _validate_input(data, x_values, data_point_labels, distribution_labels):
"""Returns a tuple containing the number of data points and distributions
in the data.
Validates plotting options to make sure they are valid with the supplied
data.
"""
if data is None or not data or isinstance(data, six.string_types):
raise ValueError("The data must be a list type, and it cannot be "
"None or empty.")
num_points = len(data)
num_distributions = len(data[0])
empty_data_error_msg = ("The data must contain at least one data "
"point, and each data point must contain at "
"least one distribution to plot.")
if num_points == 0 or num_distributions == 0:
raise ValueError(empty_data_error_msg)
for point in data:
if len(point) == 0:
raise ValueError(empty_data_error_msg)
if len(point) != num_distributions:
raise ValueError("The number of distributions in each data point "
"grouping must be the same for all data points.")
# Make sure we have the right number of x values (one for each data point),
# and make sure they are numbers.
_validate_x_values(x_values, data_point_labels, num_points)
if (distribution_labels is not None and
len(distribution_labels) != num_distributions):
raise ValueError("The number of distribution labels must be equal "
"to the number of distributions.")
return num_points, num_distributions
def _validate_x_values(x_values, x_tick_labels, num_expected_values):
"""Validates the x values provided by the user, making sure they are the
correct length and are all numbers.
Also validates the number of x-axis tick labels.
Raises a ValueError if these conditions are not met.
"""
if x_values is not None:
if len(x_values) != num_expected_values:
raise ValueError("The number of x values must match the number "
"of data points.")
try:
list(map(float, x_values))
except:
raise ValueError("Each x value must be a number.")
if x_tick_labels is not None:
if len(x_tick_labels) != num_expected_values:
raise ValueError("The number of x-axis tick labels must match the "
"number of data points.")
def _get_distribution_markers(marker_type, marker_choices, num_markers):
"""Returns a list of length num_markers of valid matplotlib colors or
symbols.
The markers will be comprised of those found in marker_choices (if not None
and not empty) or a list of predefined markers (determined by marker_type,
which can be either 'colors' or 'symbols'). If there are not enough
markers, the list of markers will be reused from the beginning again (as
many times as are necessary).
"""
if num_markers < 0:
raise ValueError("num_markers must be greater than or equal to zero.")
if marker_choices is None or len(marker_choices) == 0:
if marker_type == 'colors':
marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
elif marker_type == 'symbols':
marker_choices = \
['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
if len(marker_choices) < num_markers:
# We don't have enough markers to represent each distribution uniquely,
# so let the user know. We'll add as many markers (starting from the
# beginning of the list again) until we have enough, but the user
# should still know because they may want to provide a new list of
# markers.
warnings.warn(
"There are not enough markers to uniquely represent each "
"distribution in your dataset. You may want to provide a list "
"of markers that is at least as large as the number of "
"distributions in your dataset.",
RuntimeWarning)
marker_cycle = cycle(marker_choices[:])
while len(marker_choices) < num_markers:
marker_choices.append(next(marker_cycle))
return marker_choices[:num_markers]
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
distribution_centered):
"""Returns a 1D numpy array of x-axis tick positions.
These positions will be centered on each data point.
Set distribution_centered to True for scatter and box plots because their
plot types naturally center over a given horizontal position. Bar charts
should use distribution_centered = False because the leftmost edge of a bar
starts at a given horizontal position and extends to the right for the
width of the bar.
"""
dist_size = num_distributions - 1 if distribution_centered else\
num_distributions
return x_locations + ((dist_size * distribution_width) / 2)
def _plot_bar_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single bar in matplotlib."""
result = None
# We do not want to plot empty distributions because matplotlib will not be
# able to render them as PDFs.
if len(distribution) > 0:
avg = np.mean(distribution)
if error_bar_type == 'stdv':
error_bar = np.std(distribution)
elif error_bar_type == 'sem':
error_bar = np.std(distribution) / np.sqrt(len(distribution))
else:
raise ValueError(
"Invalid error bar type '%s'. Supported error bar types are "
"'stdv' and 'sem'." % error_bar_type)
result = plot_axes.bar(x_position, avg, distribution_width,
yerr=error_bar, ecolor='black',
facecolor=distribution_color)
return result
def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single scatterplot in matplotlib."""
result = None
x_vals = [x_position] * len(distribution)
# matplotlib's scatter function doesn't like plotting empty data.
if len(x_vals) > 0 and len(distribution) > 0:
result = plot_axes.scatter(x_vals, distribution,
marker=distribution_symbol, c='k')
return result
def _plot_box_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single boxplot in matplotlib."""
result = None
if len(distribution) > 0:
result = plot_axes.boxplot([distribution], positions=[x_position],
widths=distribution_width,
whis=whisker_length)
_color_box_plot(plot_axes, result, [distribution_color])
return result
def _is_single_matplotlib_color(color):
"""Returns True if color is a single (not a list) mpl color."""
single_color = False
if (isinstance(color, six.string_types)):
single_color = True
elif len(color) == 3 or len(color) == 4:
single_color = True
for e in color:
if not (isinstance(e, float) or isinstance(e, int)):
single_color = False
return single_color
def _color_box_plot(plot_axes, box_plot, colors):
"""Color boxes in the box plot with the specified colors.
If any of the colors are None, the box will not be colored.
The box_plot argument must be the dictionary returned by the call to
matplotlib's boxplot function, and the colors argument must consist of
valid matplotlib colors.
"""
# Note: the following code is largely taken from this matplotlib boxplot
# example:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# boxplot_demo2.html
num_colors = len(colors)
num_box_plots = len(box_plot['boxes'])
if num_colors != num_box_plots:
raise ValueError("The number of colors (%d) does not match the number "
"of boxplots (%d)." % (num_colors, num_box_plots))
for box, median, color in zip(box_plot['boxes'],
box_plot['medians'],
colors):
if color is not None:
box_x = []
box_y = []
# There are five points in the box. The first is the same as
# the last.
for i in range(5):
box_x.append(box.get_xdata()[i])
box_y.append(box.get_ydata()[i])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor=color)
plot_axes.add_patch(box_polygon)
# Draw the median lines back over what we just filled in with
# color.
median_x = []
median_y = []
for i in range(2):
median_x.append(median.get_xdata()[i])
median_y.append(median.get_ydata()[i])
plot_axes.plot(median_x, median_y, 'black')
def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
x_values=None, x_tick_labels=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None):
"""Applies various labelling options to the plot axes."""
if title is not None:
plot_axes.set_title(title)
if x_label is not None:
plot_axes.set_xlabel(x_label)
if y_label is not None:
plot_axes.set_ylabel(y_label)
if (x_tick_labels_orientation != 'vertical' and
x_tick_labels_orientation != 'horizontal'):
raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
"Valid orientations are 'vertical' or 'horizontal'."
% x_tick_labels_orientation)
# If labels are provided, always use them. If they aren't, use the x_values
# that denote the spacing between data points as labels. If that isn't
# available, simply label the data points in an incremental fashion,
# i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
if x_tick_labels is not None:
plot_axes.set_xticklabels(x_tick_labels,
rotation=x_tick_labels_orientation)
elif x_tick_labels is None and x_values is not None:
plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
else:
plot_axes.set_xticklabels(
range(1, len(plot_axes.get_xticklabels()) + 1),
rotation=x_tick_labels_orientation)
# Set the y-axis range if specified.
if y_min is not None:
plot_axes.set_ylim(bottom=float(y_min))
if y_max is not None:
plot_axes.set_ylim(top=float(y_max))
def _create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type):
"""Creates a legend on the supplied axes."""
# We have to use a proxy artist for the legend because box plots currently
# don't have a very useful legend in matplotlib, and using the default
# legend for bar/scatterplots chokes on empty/null distributions.
#
# Note: This code is based on the following examples:
# http://matplotlib.sourceforge.net/users/legend_guide.html
# http://stackoverflow.com/a/11423554
if len(distribution_markers) != len(distribution_labels):
raise ValueError("The number of distribution markers does not match "
"the number of distribution labels.")
if marker_type == 'colors':
legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, loc='best')
elif marker_type == 'symbols':
legend_proxy = [Line2D(range(1), range(1), color='white',
markerfacecolor='black', marker=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
scatterpoints=3, loc='best')
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
def _set_figure_size(fig, width=None, height=None):
"""Sets the plot figure size and makes room for axis labels, titles, etc.
If both width and height are not provided, will use matplotlib defaults.
Making room for labels will not always work, and if it fails, the user will
be warned that their plot may have cut-off labels.
"""
# Set the size of the plot figure, then make room for the labels so they
# don't get cut off. Must be done in this order.
if width is not None and height is not None and width > 0 and height > 0:
fig.set_size_inches(width, height)
try:
fig.tight_layout()
except ValueError:
warnings.warn(
"Could not automatically resize plot to make room for "
"axes labels and plot title. This can happen if the labels or "
"title are extremely long and the plot size is too small. Your "
"plot may have its labels and/or title cut-off. To fix this, "
"try increasing the plot's size (in inches) and try again.",
RuntimeWarning)
| bsd-3-clause |
btabibian/scikit-learn | sklearn/preprocessing/data.py | 3 | 90834 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
| bsd-3-clause |
merenlab/anvio | anvio/parsers/hmmscan.py | 3 | 35794 | # -*- coding: utf-8
"""Parser for HMMER's various outputs"""
import anvio
import anvio.utils as utils
import anvio.terminal as terminal
from anvio.errors import ConfigError
from anvio.parsers.base import Parser
import numpy as np
import pandas as pd
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "a.murat.eren@gmail.com"
class HMMERStandardOutput(object):
"""Parse the standard output of HMMER programs (NOTE: currently only works with hmmsearch)
The main meat of this class is to produce the attributes:
(1) self.seq_hits
(2) self.dom_hits
(3) self.ali_info
(1) self.seq_hits is a dataframe that looks like this:
| query acc target query_len evalue score bias \
| 0 3Beta_HSD PF01073.18 1998 282 5.200000e-23 76.2 0.0
| 1 3Beta_HSD PF01073.18 1723 282 1.300000e-07 25.7 0.0
| ... ... ... ... ... ... ... ...
| 3128 Voltage_CLC PF00654.19 320 354 7.200000e-65 214.3 37.1
| 3129 YkuD PF03734.13 30 146 1.700000e-14 49.3 0.2
| best_dom_evalue best_dom_score best_dom_bias expected_doms num_doms
| 0 6.600000e-22 72.6 0.0 2.0 1
| 1 1.700000e-07 25.3 0.0 1.2 1
| ... ... ... ... ... ...
| 3128 7.800000e-64 210.9 29.1 2.0 1
| 3129 3.800000e-14 48.2 0.2 1.7 1
(2) self.dom_hits is a frame that looks like this:
| query acc target domain qual score bias c-evalue \
| 0 3Beta_HSD PF01073.18 1998 1 ! 72.6 0.0 2.900000e-24
| 1 3Beta_HSD PF01073.18 1723 1 ! 25.3 0.0 7.300000e-10
| ... ... ... ... ... ... ... ... ...
| 2896 Voltage_CLC PF00654.19 320 1 ! 210.9 29.1 1.700000e-66
| 2897 YkuD PF03734.13 30 1 ! 48.2 0.2 8.400000e-17
|
| i-evalue hmm_start hmm_stop hmm_bounds ali_start ali_stop \
| 0 6.600000e-22 1 237 [. 4 243
| 1 1.700000e-07 1 95 [. 4 92
| ... ... ... ... ... ... ...
| 2896 7.800000e-64 3 352 .. 61 390
| 2897 3.800000e-14 2 146 .] 327 459
|
| ali_bounds env_start env_stop env_bounds mean_post_prob \
| 0 .. 4 254 .. 0.74
| 1 .. 4 148 .. 0.72
| ... ... ... ... ... ...
| 2896 .. 59 392 .. 0.94
| 2897 .. 326 459 .. 0.78
|
| match_state_align comparison_align sequence_align
| 0 vvtGggGFlGrrivkeLlrl... +v+Gg+G++G++ v +L++ ... LVLGGAGYIGSHAVDQLISK...
| 1 vvtGggGFlGrrivkeLlrl... ++ Gg+GFlG++i k L+++... IIFGGSGFLGQQIAKILVQR...
| ... ... ... ...
| 2896 gllagllvkrvapeaagsGi... g++ +++ r+ + a G ... GVVFTYFYTRF-GKNASRGN...
| 2897 kyivvdlaeqrllvlyengk... +yi++dl++q++ +++ +gk... NYIEIDLKDQKM-YCFIDGK...
If you're confused about the meaning of these columns, please see starting from page 32
of the HMMER guide http://eddylab.org/software/hmmer/Userguide.pdf. There you will be able
to with relative ease correlate the column names in these tables to what is described
meticulously in the tutorial. For example, `best_dom_bias` refers to the the 'bias (best 1
domain)' column.
(3) ali_info is a nested dictionary that can be used to access on a per-hit basis which residues
in a sequence aligned to which residues in the HMM.
Parameters
==========
hmmer_std_out : str
Path to output of HMMER.
context : str, None
If provided, operations specific to a context will also be carried out. Choose from
{'interacdome'}
"""
def __init__(self, hmmer_std_out, context=None, run=terminal.Run(), progress=terminal.Progress()):
self.run = run
self.progress = progress
self.hmmer_std_out = hmmer_std_out
self.context = context
self.set_names()
self.ali_info = {}
# This is converted to a dataframe after populating
self.seq_hits = {
self.query_col: [],
self.acc_col: [],
self.target_col: [],
self.query_len_col: [],
'evalue': [],
'score': [],
'bias': [],
'best_dom_evalue': [],
'best_dom_score': [],
'best_dom_bias': [],
'expected_doms': [],
'num_doms': [],
}
self.seq_hits_dtypes = {
self.query_col: str,
self.acc_col: str,
self.target_col: str,
self.query_len_col: int,
'evalue': float,
'score': float,
'bias': float,
'best_dom_evalue': float,
'best_dom_score': float,
'best_dom_bias': float,
'expected_doms': float,
'num_doms': int,
}
# This is converted to a dataframe after populating
self.dom_hits = {
self.query_col: [],
self.acc_col: [],
self.target_col: [],
'domain': [],
'qual': [],
'score': [],
'bias': [],
'c-evalue': [],
'i-evalue': [],
'hmm_start': [],
'hmm_stop': [],
'hmm_bounds': [],
'ali_start': [],
'ali_stop': [],
'ali_bounds': [],
'env_start': [],
'env_stop': [],
'env_bounds': [],
'mean_post_prob': [],
'match_state_align': [],
'comparison_align': [],
'sequence_align': [],
}
self.dom_hits_dtypes = {
self.query_col: str,
self.acc_col: str,
self.target_col: str,
'domain': int,
'qual': str,
'score': float,
'bias': float,
'c-evalue': float,
'i-evalue': float,
'hmm_start': int,
'hmm_stop': int,
'hmm_bounds': str,
'ali_start': int,
'ali_stop': int,
'ali_bounds': str,
'env_start': int,
'env_stop': int,
'env_bounds': str,
'mean_post_prob': float,
'match_state_align': str,
'comparison_align': str,
'sequence_align': str,
}
self.delim_query = '//\n'
self.delim_seq = '>>'
self.delim_domain = '=='
self.load()
def load(self):
self.progress.new('Processing HMMER output')
self.progress.update('Parsing %s' % self.hmmer_std_out)
with open(self.hmmer_std_out) as f:
for i, query in enumerate(utils.get_chunk(f, separator=self.delim_query, read_size=32768)):
if i % 500 == 0:
self.progress.update('%d done' % i)
self.progress.increment(increment_to=i)
self.process_query(query)
self.seq_hits = pd.DataFrame(self.seq_hits).astype(self.seq_hits_dtypes)
self.dom_hits = pd.DataFrame(self.dom_hits).astype(self.dom_hits_dtypes)
self.progress.end()
self.additional_processing()
self.run.info('Loaded HMMER results from', self.hmmer_std_out)
def find_line(self, condition):
for line in self.query_lines[self.line_no:]:
self.line_no += 1
if line.startswith('#'):
continue
if condition(line):
return line
else:
return False
def read_lines_until(self, condition, include_last=False, store=True):
lines = []
return_value = lines if store else True
for line in self.query_lines[self.line_no:]:
self.line_no += 1
if line.startswith('#'):
continue
if condition(line):
if include_last and store:
lines.append(line)
return lines
if store:
lines.append(line)
else:
if store:
return lines
else:
return False
def process_query(self, query):
if self.delim_seq not in query:
# This query had no hits
return
self.query_lines = query.split('\n')
self.line_no = 0
line = self.find_line(lambda line: line.startswith('Query:'))
line_split = line.split()
query_name = line_split[1]
query_len = int(line_split[2][line_split[2].find('=')+1:-1])
line = self.find_line(lambda line: line.startswith('Accession:'))
acc = line.split()[1]
line = self.find_line(lambda line: line.lstrip().startswith('E-value'))
description_index = line.find('Desc')
fields = line[:description_index].split() # ignore last 'Description' field
assert len(fields) == 9, "Please report this on github with your HMMER version"
self.read_lines_until(lambda line: line.lstrip().startswith('-------'), store=False)
seq_score_lines = self.read_lines_until(lambda line: line == '')
num_doms_per_seq = {}
for seq_score_line in seq_score_lines:
seq_scores = seq_score_line[:description_index].split()
self.seq_hits[self.query_col].append(query_name)
self.seq_hits[self.query_len_col].append(query_len)
self.seq_hits[self.acc_col].append(acc)
self.seq_hits['evalue'].append(float(seq_scores[0]))
self.seq_hits['score'].append(float(seq_scores[1]))
self.seq_hits['bias'].append(float(seq_scores[2]))
self.seq_hits['best_dom_evalue'].append(float(seq_scores[3]))
self.seq_hits['best_dom_score'].append(float(seq_scores[4]))
self.seq_hits['best_dom_bias'].append(float(seq_scores[5]))
self.seq_hits['expected_doms'].append(float(seq_scores[6]))
self.seq_hits['num_doms'].append(int(seq_scores[7]))
self.seq_hits[self.target_col].append(seq_scores[8])
num_doms_per_seq[seq_scores[8]] = int(seq_scores[7])
num_seq_hits = len(seq_score_lines)
for _ in range(num_seq_hits):
target_name = self.find_line(lambda line: line.startswith(self.delim_seq)).split()[1]
if num_doms_per_seq[target_name] == 0:
continue
self.line_no += 2
for __ in range(num_doms_per_seq[target_name]):
dom_score_summary = self.find_line(lambda line: True).split()
self.dom_hits[self.query_col].append(query_name)
self.dom_hits[self.acc_col].append(acc)
self.dom_hits[self.target_col].append(target_name)
self.dom_hits['domain'].append(dom_score_summary[0])
self.dom_hits['qual'].append(dom_score_summary[1])
self.dom_hits['score'].append(dom_score_summary[2])
self.dom_hits['bias'].append(dom_score_summary[3])
self.dom_hits['c-evalue'].append(dom_score_summary[4])
self.dom_hits['i-evalue'].append(dom_score_summary[5])
self.dom_hits['hmm_start'].append(dom_score_summary[6])
self.dom_hits['hmm_stop'].append(dom_score_summary[7])
self.dom_hits['hmm_bounds'].append(dom_score_summary[8])
self.dom_hits['ali_start'].append(dom_score_summary[9])
self.dom_hits['ali_stop'].append(dom_score_summary[10])
self.dom_hits['ali_bounds'].append(dom_score_summary[11])
self.dom_hits['env_start'].append(dom_score_summary[12])
self.dom_hits['env_stop'].append(dom_score_summary[13])
self.dom_hits['env_bounds'].append(dom_score_summary[14])
self.dom_hits['mean_post_prob'].append(dom_score_summary[15])
for __ in range(num_doms_per_seq[target_name]):
self.find_line(lambda line: line.lstrip().startswith(self.delim_domain))
if __ == num_doms_per_seq[target_name] - 1:
if _ == num_seq_hits - 1:
# This is the last alignment in the summary_info. Go to end of string
ali_lines = self.read_lines_until(lambda line: False)
else:
# This is the last alignment in the sequence. Go to next sequence delimiter
ali_lines = self.read_lines_until(lambda line: line.lstrip().startswith(self.delim_seq))
self.line_no -= 1
else:
ali_lines = self.read_lines_until(lambda line: line.lstrip().startswith(self.delim_domain))
self.line_no -= 1
consensus = []
match = []
target = []
line_index = 0
while True:
if line_index >= len(ali_lines):
break
line = ali_lines[line_index]
if not line.lstrip().startswith(query_name + ' '):
line_index += 1
continue
cons_seq_fragment = line.split()[2]
frag_len = len(cons_seq_fragment)
ali_index = line.find(cons_seq_fragment)
consensus.append(cons_seq_fragment)
match.append(ali_lines[line_index + 1][ali_index: ali_index + frag_len])
target.append(ali_lines[line_index + 2][ali_index: ali_index + frag_len])
line_index += 2
self.dom_hits['match_state_align'].append(''.join(consensus))
self.dom_hits['comparison_align'].append(''.join(match))
self.dom_hits['sequence_align'].append(''.join(target))
def set_names(self):
"""Set the column names depending on self.context"""
if self.context is None:
self.query_col = 'query'
self.acc_col = 'acc'
self.query_len_col = 'query_len'
self.target_col = 'target'
elif self.context == 'interacdome':
self.query_col = 'pfam_name'
self.acc_col = 'pfam_id'
self.query_len_col = 'pfam_len'
self.target_col = 'corresponding_gene_call'
def additional_processing(self):
"""Further process raw data"""
if self.context is None:
self.get_ali_info()
elif self.context == 'interacdome':
self.seq_hits['corresponding_gene_call'] = self.seq_hits['corresponding_gene_call'].astype(int)
self.dom_hits['corresponding_gene_call'] = self.dom_hits['corresponding_gene_call'].astype(int)
if self.dom_hits.empty:
self.dom_hits['version'] = []
else:
self.dom_hits[['pfam_id', 'version']] = self.dom_hits['pfam_id'].str.split('.', n=1, expand=True)
if self.seq_hits.empty:
self.seq_hits['version'] = []
else:
self.seq_hits[['pfam_id', 'version']] = self.seq_hits['pfam_id'].str.split('.', n=1, expand=True)
# For convenience this is done after pfam_id has been split
self.get_ali_info()
def get_ali_info(self):
"""Creates self.ali_info. See class docstring for description
Notes
=====
- This function is very slow.
- EDIT: This function is not _that_ slow
"""
if self.dom_hits.empty:
return
unique_targets = self.dom_hits[self.target_col].nunique()
self.progress.new('Processing alignment info', progress_total_items=unique_targets)
gap_chars = {'-', '.'}
processed = 0
for target, subset in self.dom_hits.groupby(self.target_col):
if processed % 50 == 0:
self.progress.update('%d/%d done' % (processed, unique_targets))
self.progress.increment(increment_to=processed)
self.ali_info[target] = {}
for acc, subsubset in subset.groupby(self.acc_col):
for i, row in subsubset.iterrows():
seq_positions, seq_chars, hmm_positions, hmm_chars, comparison_chars = [], [], [], [], []
seq_pos, hmm_pos = row['ali_start'], row['hmm_start']
sequence, match_state, comparison = row['sequence_align'], row['match_state_align'], row['comparison_align']
assert len(sequence) == len(match_state)
for i in range(len(sequence)):
seq_char, hmm_char, comparison_char = sequence[i], match_state[i], comparison[i]
if (seq_char not in gap_chars) and (hmm_char not in gap_chars):
# there is alignment (non-gap characters)
seq_positions.append(seq_pos)
seq_chars.append(seq_char)
hmm_positions.append(hmm_pos)
hmm_chars.append(hmm_char.upper())
comparison_chars.append(comparison_char.upper())
seq_pos += 1
hmm_pos += 1
elif (seq_char in gap_chars) and (hmm_char not in gap_chars):
# gap in seq
hmm_pos += 1
elif (seq_char not in gap_chars) and (hmm_char in gap_chars):
# gap in match state
seq_pos += 1
else:
# this happens with 0 probability
pass
# The HMM state and sequence positions are 1-indexed. We subtract by 1 to make them zero-indexed
self.ali_info[target][(acc, row['domain'])] = pd.DataFrame({
'seq': seq_chars,
'hmm': hmm_chars,
'comparison': comparison_chars,
'seq_positions': np.array(seq_positions) - 1,
'hmm_positions': np.array(hmm_positions) - 1,
})
processed += 1
self.progress.end()
class HMMERTableOutput(Parser):
"""Parse --tblout or --domtblout output formats for hmmer programs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
<rant>
Parsing of HMMER tabular output needs to be redesigned. This code does not actually take output from hmmer
and parse it. It parses the output file of anvio.driver.HMMER.hmmscan_worker which preprocesses the
output format. The responsibility of HMMER output parsing needs to be consolidated in one spot. Biopython, a
dependency of anvi'o, has an HMMER parser. See https://biopython.org/DIST/docs/api/Bio.SearchIO.HmmerIO-module.html.
Perhaps this is more robust solution. This design is currently hanging on by a thread.
</rant>
Output specifictions of HMMER can be found in the user guide. At time of writing this,
http://eddylab.org/software/hmmer/Userguide.pdf hosts the user guide.
Parameters
==========
hmmer_table_txt: ???
Undocumented FIXME
alphabet: str, 'AA'
Which alphabet do the HMMs use? Pick from {'AA', 'DNA', 'RNA'}
context: str, 'GENE'
This tells the class how the output should be parsed. Pick from {'GENE', 'CONTIG',
'DOMAIN'}. Before being preprocessed by anvio.driver.HMMER.hmmscan_worker (see this module's
docstring), the header of the file should look like so, based on which context you use:
GENE:
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- -----------
DOMAIN:
# --- full sequence --- -------------- this domain ------------- hmm coord ali coord env coord
# target name accession tlen query name accession qlen E-value score bias # of c-Evalue i-Evalue score bias from to from to from to acc description
#------------------- ---------- ----- -------------------- ---------- ----- --------- ------ ----- --- --- --------- --------- ------ ----- ----- ----- ----- ----- ----- ----- ---- -----------
CONTIG:
Undocumented FIXME
`DOMAIN` is untested.
"""
def __init__(self, hmmer_table_txt, alphabet='AA', context='GENE', program='hmmscan', run=terminal.Run()):
self.alphabet = alphabet
self.context = context
self.program = program
self.run = run
files_expected = {'hits': hmmer_table_txt}
if self.context == "GENE":
col_info = self.get_col_info_for_GENE_context()
elif self.context == "CONTIG" and (self.alphabet == "DNA" or self.alphabet == "RNA"):
col_info = self.get_col_info_for_CONTIG_context()
elif self.context == "DOMAIN" and self.alphabet == "AA":
if program != 'hmmsearch':
raise ConfigError("HMMScan :: the 'DOMAIN' context is only available for hmmsearch.")
col_info = self.get_col_info_for_DOMAIN_context()
else:
raise ConfigError("HMMScan driver is confused. Yor context and alphabet pair ('%s' and '%s') "
"does not seem to be implemented in the parser module. If you think this is "
"not a mistake on your part, please get in touch with the anvi'o developers "
"and watch them fix it like actual pros." % (self.context, self.alphabet))
col_names, col_mapping = col_info
files_structure = {
'hits': {
'col_names': col_names,
'col_mapping': col_mapping,
'indexing_field': -1,
'no_header': True,
},
}
Parser.__init__(self, 'HMMScan', [hmmer_table_txt], files_expected, files_structure)
def get_col_info_for_GENE_context(self):
"""Get column names and types for GENE context
See class docstring for details of the fields for AA sequence search, and DNA sequence search.
"""
if self.program == 'hmmscan':
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- ---
col_names = ['gene_name', 'gene_hmm_id', 'gene_callers_id', 'f', 'e_value', 'bit_score', 'f', 'f', 'dom_bit_score', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f']
col_mapping = [str, str, int, str, float, float, str, str, float, str, str, str, str, str, str, str, str, str]
elif self.program == 'hmmsearch':
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- ---
col_names = ['gene_callers_id', 'f', 'gene_name', 'gene_hmm_id', 'e_value', 'bit_score', 'f', 'f', 'dom_bit_score', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f']
col_mapping = [int, str, str, str, float, float, str, str, float, str, str, str, str, str, str, str, str, str]
else:
raise ConfigError("The HMMScan Parser class is not sure if you know what you are doing. You told it that you wanted to "
"parse HMM hits from the program %s, but this class doesn't know how to handle those." % (self.program))
return col_names, col_mapping
def get_col_info_for_CONTIG_context(self):
"""Get column names and types for GENE context
See class docstring for details of the fields for AA sequence search, and DNA sequence search.
"""
# 'hmm_target', 'hmm_acc', 'query_id', 'query_acc', 'hmm_from', 'hmm_to', 'alignment_from', 'alignment_to', 'envelope_from', 'envelope_to', 'seq_len', 'strand', 'e_value', 'score', 'bias',]
col_names = ['gene_name', 'gene_hmm_id', 'contig_name', 'f', 'hmm_from', 'hmm_to', 'alignment_from', 'alignment_to', 'envelope_from', 'envelope_to', 'f', 'f', 'e_value', 'f', 'f']
col_mapping = [str, str, str, str, str, str, int, int, int, int, str, str, float, str, str]
return col_names, col_mapping
def get_col_info_for_DOMAIN_context(self):
"""Get column names and types for DOMAIN context
See class docstring for details of the fields
"""
col_info = [
('gene_callers_id', int), # target name
('f', str), # accession
('gene_length', int), # tlen
('hmm_name', str), # query name
('hmm_id', str), # accession
('hmm_length', int), # qlen
('evalue', float), # E-value (full sequence)
('bitscore', float), # score (full sequence)
('bias', float), # bias (full sequence)
('match_num', int), # # (this domain)
('num_matches', int), # of (this domain)
('dom_c_evalue', float), # c-Evalue (this domain)
('dom_i_evalue', float), # i-Evalue (this domain)
('dom_bitscore', str), # score (this domain)
('dom_bias', float), # bias (this domain)
('hmm_start', int), # from (hmm coord)
('hmm_stop', int), # to (hmm coord)
('gene_start', int), # from (ali coord)
('gene_stop', int), # to (ali coord)
('f', str), # from (env coord)
('f', str), # to (env coord)
('mean_post_prob', float), # acc
]
return list(zip(*col_info))
def get_search_results(self, noise_cutoff_dict=None, return_bitscore_dict=False):
"""Goes through the hits provided by `hmmscan` and generates an annotation dictionary with the relevant information about each hit.
This function makes sure only hits with a high enough bit score make it into the annotation dictionary.
Parameters
==========
noise_cutoff_dict : dictionary
dictionary of noise cutoff terms; see setup_ko_dict in kofam.py for an example
return_bitscore_dict : boolean
if True, this function will also return a dictionary of bitscores for each hit
Returns
=======
annotations_dict : dictionary
dictionary of annotations, one annotation per HMM hit
bitscore_dict : dictionary
dictionary of bitscore information, one entry per HMM hit, including full and domain-level bitscore.
only returned if return_bitscore_dict is True, and only applies to GENE context.
"""
annotations_dict = {}
bit_score_info_dict = {}
# this is the stuff we are going to try to fill with this:
# search_table_structure = ['entry_id', 'source', 'alphabet', 'contig', 'gene_callers_id' 'gene_name', 'gene_hmm_id', 'e_value']
entry_id = 0
num_hits_removed = 0 # a counter for the number of hits we don't add to the annotation dictionary
for hit in list(self.dicts['hits'].values()):
entry = None
bit_score_info_dict_entry = None
if self.context == 'GENE':
# Here we only add the hit to the annotations_dict if the appropriate bit score is above the
# threshold set in noise_cutoff_dict (which is indexed by profile name (aka gene_name in the hits dict)
if noise_cutoff_dict and hit['gene_name'] in noise_cutoff_dict.keys():
hmm_entry_name = hit['gene_name']
score_type = noise_cutoff_dict[hmm_entry_name]['score_type']
threshold = noise_cutoff_dict[hmm_entry_name]['threshold']
keep = True
if score_type == 'full':
if hit['bit_score'] < float(threshold):
keep = False
elif score_type == 'domain':
if hit['dom_bit_score'] < float(threshold):
keep = False
else:
self.run.warning("Oh dear. The HMM profile %s has a strange score_type value: %s. The only accepted values "
"for this type are 'full' or 'domain', so anvi'o cannot parse the hits to this profile. All hits "
"will be kept regardless of bit score. You have been warned." % (hit['gene_name'], score_type))
if keep:
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
else:
num_hits_removed += 1
elif noise_cutoff_dict and hit['gene_name'] not in noise_cutoff_dict.keys():
# this should never happen, in an ideal world where everything is filled with butterflies and happiness
self.run.warning("Hmm. While parsing your HMM hits, it seems the HMM profile %s was not found in the noise cutoff dictionary. "
"This should probably not ever happen, and you should contact a developer as soon as possible to figure out what "
"is going on. But for now, anvi'o is going to keep all hits to this profile. Consider those hits with a grain of salt, "
"as not all of them may be good." % hit['gene_name'])
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
else:
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
elif self.context == 'CONTIG' and (self.alphabet == 'DNA' or self.alphabet == 'RNA'):
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'contig_name': hit['contig_name'],
'start': hit['alignment_from'],
'stop': hit['alignment_to'],
'e_value': hit['e_value']}
else:
raise ConfigError("Anvi'o does not know how to parse %s:%s" % (self.alphabet, self.context))
if entry:
entry_id += 1
annotations_dict[entry_id] = entry
if return_bitscore_dict and bit_score_info_dict_entry:
bit_score_info_dict[entry_id] = bit_score_info_dict_entry
self.run.info("Number of weak hits removed", num_hits_removed)
self.run.info("Number of hits in annotation dict ", len(annotations_dict.keys()))
if return_bitscore_dict:
return annotations_dict, bit_score_info_dict
return annotations_dict
| gpl-3.0 |
drufat/vispy | vispy/testing/__init__.py | 21 | 2415 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Testing
=======
This module provides functions useful for running tests in vispy.
Tests can be run in a few ways:
* From Python, you can import ``vispy`` and do ``vispy.test()``.
* From the source root, you can do ``make test`` which wraps to
a call to ``python make test``.
There are various diffrent testing "modes", including:
* "full": run all tests.
* any backend name (e.g., "glfw"): run application/GL tests using a
specific backend.
* "nobackend": run tests that do not require a backend.
* "examples": run repo examples to check for errors and warnings.
* "flake": check style errors.
Examples get automatically tested unless they have a special comment toward
the top ``# vispy: testskip``. Examples that should be tested should be
formatted so that 1) a ``Canvas`` class is defined, or a ``canvas`` class
is instantiated; and 2) the ``app.run()`` call is protected by a check
if ``__name__ == "__main__"``. This makes it so that the event loop is not
started when running examples in the test suite -- the test suite instead
manually updates the canvas (using ``app.process_events()``) for under one
second to ensure that things like timer events are processed.
For examples on how to test various bits of functionality (e.g., application
functionality, or drawing things with OpenGL), it's best to look at existing
examples in the test suite.
The code base gets automatically tested by Travis-CI (Linux) and AppVeyor
(Windows) on Python 2.6, 2.7, 3.4. There are multiple testing modes that
use e.g. full dependencies, minimal dependencies, etc. See ``.travis.yml``
to determine what automatic tests are run.
"""
from ._testing import (SkipTest, requires_application, requires_ipython, # noqa
requires_img_lib, # noqa
has_backend, requires_pyopengl, # noqa
requires_scipy, has_matplotlib, # noqa
save_testing_image, TestingCanvas, has_pyopengl, # noqa
run_tests_if_main,
assert_is, assert_in, assert_not_in, assert_equal,
assert_not_equal, assert_raises, assert_true, # noqa
raises) # noqa
from ._runners import test # noqa
| bsd-3-clause |
oddt/oddt | oddt/pandas.py | 2 | 19996 | """ Pandas extension for chemical analysis """
from __future__ import absolute_import
from collections import deque
from six import BytesIO, StringIO, text_type
import pandas as pd
import oddt
pd.set_option("display.max_colwidth", 999999)
def _mol_reader(fmt='sdf',
filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Universal reading function for private use.
.. versionadded:: 0.3
Parameters
----------
fmt : string
The format of molecular file
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
chunk :
A `ChemDataFrame` containg `chunksize` molecules.
"""
# capture options for reader
reader_kwargs = {}
if 'opt' in kwargs:
reader_kwargs['opt'] = kwargs.pop('opt')
if 'sanitize' in kwargs:
reader_kwargs['sanitize'] = kwargs.pop('sanitize')
# when you dont read molecules you can skip parsing them
if molecule_column is None:
if oddt.toolkit.backend == 'ob' and fmt == 'sdf':
if 'opt' in reader_kwargs:
reader_kwargs['opt']['P'] = None
else:
reader_kwargs['opt'] = {'P': None}
elif oddt.toolkit.backend == 'rdk':
reader_kwargs['sanitize'] = False
chunk = []
for n, mol in enumerate(oddt.toolkit.readfile(fmt, filepath_or_buffer,
**reader_kwargs)):
if skip_bad_mols and mol is None:
continue # add warning with number of skipped molecules
if usecols is None:
mol_data = mol.data.to_dict()
else:
mol_data = dict((k, mol.data[k]) for k in usecols)
if molecule_column:
mol_data[molecule_column] = mol
if molecule_name_column:
mol_data[molecule_name_column] = mol.title
if smiles_column:
mol_data[smiles_column] = mol.smiles
chunk.append(mol_data)
if chunksize and (n + 1) % chunksize == 0:
chunk_frm = ChemDataFrame(chunk, **kwargs)
chunk_frm._molecule_column = molecule_column
yield chunk_frm
chunk = []
if chunk or chunksize is None:
chunk_frm = ChemDataFrame(chunk, **kwargs)
chunk_frm._molecule_column = molecule_column
yield chunk_frm
def _mol_writer(data,
fmt='sdf',
filepath_or_buffer=None,
update_properties=True,
molecule_column=None,
columns=None):
"""Universal writing function for private use.
.. versionadded:: 0.3
Parameters
----------
fmt : string
The format of molecular file
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
if filepath_or_buffer is None:
out = StringIO()
elif hasattr(filepath_or_buffer, 'write'):
out = filepath_or_buffer
else:
out = oddt.toolkit.Outputfile(fmt, filepath_or_buffer, overwrite=True)
if isinstance(data, pd.DataFrame):
molecule_column = molecule_column or data._molecule_column
for ix, row in data.iterrows():
mol = row[molecule_column].clone
if update_properties:
new_data = row.to_dict()
del new_data[molecule_column]
mol.data.update(new_data)
if columns:
for k in mol.data.keys():
if k not in columns:
del mol.data[k]
if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'):
out.write(mol.write(fmt))
else:
out.write(mol)
elif isinstance(data, pd.Series):
for mol in data:
if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'):
out.write(mol.write(fmt))
else:
out.write(mol)
if filepath_or_buffer is None:
return out.getvalue()
elif not hasattr(filepath_or_buffer, 'write'): # dont close foreign buffer
out.close()
def read_csv(*args, **kwargs):
""" TODO: Support Chunks """
smiles_to_molecule = kwargs.pop('smiles_to_molecule', None)
molecule_column = kwargs.pop('molecule_column', 'mol')
data = pd.read_csv(*args, **kwargs)
if smiles_to_molecule is not None:
data[molecule_column] = data[smiles_to_molecule].map(
lambda x: oddt.toolkit.readstring('smi', x))
return data
def read_sdf(filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Read SDF/MDL multi molecular file to ChemDataFrame
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
result :
A `ChemDataFrame` containg all molecules if `chunksize` is None
or genrerator of `ChemDataFrame` with `chunksize` molecules.
"""
result = _mol_reader(fmt='sdf',
filepath_or_buffer=filepath_or_buffer,
usecols=usecols,
molecule_column=molecule_column,
molecule_name_column=molecule_name_column,
smiles_column=smiles_column,
skip_bad_mols=skip_bad_mols,
chunksize=chunksize,
**kwargs)
if chunksize:
return result
else:
return deque(result, maxlen=1).pop()
def read_mol2(filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Read Mol2 multi molecular file to ChemDataFrame. UCSF Dock 6 comments
style is supported, i.e. `#### var_name: value` before molecular block.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
result :
A `ChemDataFrame` containg all molecules if `chunksize` is None
or genrerator of `ChemDataFrame` with `chunksize` molecules.
"""
result = _mol_reader(fmt='mol2',
filepath_or_buffer=filepath_or_buffer,
usecols=usecols,
molecule_column=molecule_column,
molecule_name_column=molecule_name_column,
smiles_column=smiles_column,
skip_bad_mols=skip_bad_mols,
chunksize=chunksize,
**kwargs)
if chunksize:
return result
else:
return deque(result, maxlen=1).pop()
class ChemSeries(pd.Series):
"""Pandas Series modified to adapt `oddt.toolkit.Molecule` objects and apply
molecular methods easily.
.. versionadded:: 0.3
"""
def __le__(self, other):
""" Substructure searching.
`chemseries < mol`: are molecules in series substructures of a `mol`
"""
if (isinstance(other, oddt.toolkit.Molecule) and
isinstance(self[0], oddt.toolkit.Molecule)):
return self.map(lambda x: oddt.toolkit.Smarts(x.smiles).match(other))
else:
return super(ChemSeries, self).__le__(other)
def __ge__(self, other):
""" Substructure searching.
`chemseries > mol`: is `mol` a substructure of molecules in series
"""
if (isinstance(other, oddt.toolkit.Molecule) and
isinstance(self[0], oddt.toolkit.Molecule)):
smarts = oddt.toolkit.Smarts(other.smiles)
return self.map(lambda x: smarts.match(x))
else:
return super(ChemSeries, self).__ge__(other)
def __or__(self, other):
""" Tanimoto coefficient """
if (isinstance(self[0], oddt.toolkit.Fingerprint) and
isinstance(other, oddt.toolkit.Fingerprint)):
return self.map(lambda x: x | other)
else:
return super(ChemSeries, self).__or__(other)
def calcfp(self, *args, **kwargs):
"""Helper function to map FP calculation throuugh the series"""
assert(isinstance(self[0], oddt.toolkit.Molecule))
return self.map(lambda x: x.calcfp(*args, **kwargs))
def to_smiles(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='smi', filepath_or_buffer=filepath_or_buffer)
def to_sdf(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='sdf', filepath_or_buffer=filepath_or_buffer)
def to_mol2(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='mol2', filepath_or_buffer=filepath_or_buffer)
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemSeries
@property
def _constructor_expanddim(self):
""" Force new class to be usead as constructor when expandig dims """
return ChemDataFrame
class ChemDataFrame(pd.DataFrame):
"""Chemical DataFrame object, which contains molecules column of
`oddt.toolkit.Molecule` objects. Rich display of moleucles (2D) is available
in iPython Notebook. Additional `to_sdf` and `to_mol2` methods make writing
to molecular formats easy.
.. versionadded:: 0.3
Notes
-----
Thanks to: http://blog.snapdragon.cc/2015/05/05/subclass-pandas-dataframe-to-save-custom-attributes/
"""
_metadata = ['_molecule_column']
_molecule_column = None
def to_sdf(self,
filepath_or_buffer=None,
update_properties=True,
molecule_column=None,
columns=None):
"""Write DataFrame to SDF file.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
molecule_column = molecule_column or self._molecule_column
return _mol_writer(self,
filepath_or_buffer=filepath_or_buffer,
update_properties=update_properties,
fmt='sdf',
molecule_column=molecule_column,
columns=columns)
def to_mol2(self,
filepath_or_buffer=None,
update_properties=True,
molecule_column='mol',
columns=None):
"""Write DataFrame to Mol2 file.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
molecule_column = molecule_column or self._molecule_column
return _mol_writer(self,
fmt='mol2',
filepath_or_buffer=filepath_or_buffer,
update_properties=update_properties,
molecule_column=molecule_column,
columns=columns)
def to_html(self, *args, **kwargs):
"""Patched rendering in HTML - don't escape HTML inside the cells.
Docs are copied from parent
"""
kwargs['escape'] = False
return super(ChemDataFrame, self).to_html(*args, **kwargs)
def to_csv(self, *args, **kwargs):
""" Docs are copied from parent """
if self._molecule_column and ('columns' not in kwargs or
kwargs['columns'] is None or
self._molecule_column in kwargs['columns']):
frm_copy = self.copy(deep=True)
smi = frm_copy[self._molecule_column].map(lambda x: x.smiles)
frm_copy[self._molecule_column] = smi
return super(ChemDataFrame, frm_copy).to_csv(*args, **kwargs)
else:
return super(ChemDataFrame, self).to_csv(*args, **kwargs)
def to_excel(self, *args, **kwargs):
""" Docs are copied from parent """
if 'columns' in kwargs:
columns = kwargs['columns']
else:
columns = self.columns.tolist()
if 'molecule_column' in kwargs:
molecule_column = kwargs['molecule_column']
else:
molecule_column = self._molecule_column
molecule_column_idx = columns.index(molecule_column)
if 'index' not in kwargs or ('index' in kwargs and kwargs['index']):
molecule_column_idx += 1
size = kwargs.pop('size') if 'size' in kwargs else (200, 200)
excel_writer = args[0]
if isinstance(excel_writer, str):
excel_writer = pd.ExcelWriter(excel_writer, engine='xlsxwriter')
assert excel_writer.engine == 'xlsxwriter'
frm_copy = self.copy(deep=True)
smi = frm_copy[molecule_column].map(lambda x: x.smiles)
frm_copy[molecule_column] = smi
super(ChemDataFrame, frm_copy).to_excel(excel_writer, *args[1:], **kwargs)
sheet = excel_writer.sheets['Sheet1'] # TODO: Get appropriate sheet name
sheet.set_column(molecule_column_idx, molecule_column_idx,
width=size[1] / 6.)
for i, mol in enumerate(self[molecule_column]):
if mol is None:
continue
img = BytesIO()
png = mol.clone.write('png', size=size)
if isinstance(png, text_type):
png = png.encode('utf-8', errors='surrogateescape')
img.write(png)
sheet.write_string(i + 1, molecule_column_idx, "")
sheet.insert_image(i + 1,
molecule_column_idx,
'dummy',
{'image_data': img,
'positioning': 2,
'x_offset': 1,
'y_offset': 1})
sheet.set_row(i + 1, height=size[0])
excel_writer.save()
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemDataFrame
@property
def _constructor_sliced(self):
""" Force new class to be usead as constructor when slicing """
return ChemSeries
@property
def _constructor_expanddim(self):
""" Force new class to be usead as constructor when expandig dims """
return ChemPanel
# Copy some docscrings from upstream classes
for method in ['to_html', 'to_csv', 'to_excel']:
try:
getattr(ChemDataFrame, method).__doc__ = getattr(pd.DataFrame, method).__doc__
except AttributeError: # Python 2 compatible
getattr(ChemDataFrame, method).__func__.__doc__ = getattr(pd.DataFrame, method).__func__.__doc__
class ChemPanel(pd.Panel):
"""Modified `pandas.Panel` to adopt higher dimension data than
`ChemDataFrame`. Main purpose is to store molecular fingerprints in one
column and keep 2D numpy array underneath.
.. versionadded:: 0.3
"""
_metadata = ['_molecule_column']
_molecule_column = None
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemPanel
@property
def _constructor_sliced(self):
""" Force new class to be usead as constructor when slicing """
return ChemDataFrame
| bsd-3-clause |
SciTools/iris | lib/iris/tests/unit/plot/test_points.py | 5 | 2370 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.plot.points` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=("bar", "str_coord"))
self.assertBoundsTickLabels("yaxis")
def test_xaxis_labels(self):
iplt.points(self.cube, coords=("str_coord", "bar"))
self.assertBoundsTickLabels("xaxis")
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=("str_coord", "bar"), axes=ax)
plt.close(fig)
self.assertPointsTickLabels("xaxis", ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=("bar", "str_coord"), axes=ax)
plt.close(fig)
self.assertPointsTickLabels("yaxis", ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord("foo").points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord("bar").points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch("matplotlib.pyplot.scatter")
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
Adai0808/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
goldmedal/spark | python/pyspark/sql/pandas/functions.py | 2 | 27749 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import sys
import warnings
from pyspark import since
from pyspark.rdd import PythonEvalType
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
from pyspark.sql.types import DataType
from pyspark.sql.udf import _create_udf
from pyspark.util import _get_argspec
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a pandas user defined function (a.k.a. vectorized user defined function).
Pandas UDFs are user defined functions that are executed by Spark using Arrow to transfer
data and Pandas to work with the data, which allows vectorized operations. A Pandas UDF
is defined using the `pandas_udf` as a decorator or to wrap the function, and no
additional configuration is required. A Pandas UDF behaves as a regular PySpark function
API in general.
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
.. note:: This parameter exists for compatibility. Using Python type hints is encouraged.
In order to use this API, customarily the below are imported:
>>> import pandas as pd
>>> from pyspark.sql.functions import pandas_udf
From Spark 3.0 with Python 3.6+, `Python type hints <https://www.python.org/dev/peps/pep-0484>`_
detect the function types as below:
>>> @pandas_udf(IntegerType())
... def slen(s: pd.Series) -> pd.Series:
... return s.str.len()
Prior to Spark 3.0, the pandas UDF used `functionType` to decide the execution type as below:
>>> from pyspark.sql.functions import PandasUDFType
>>> from pyspark.sql.types import IntegerType
>>> @pandas_udf(IntegerType(), PandasUDFType.SCALAR)
... def slen(s):
... return s.str.len()
It is preferred to specify type hints for the pandas UDF instead of specifying pandas UDF
type via `functionType` which will be deprecated in the future releases.
Note that the type hint should use `pandas.Series` in all cases but there is one variant
that `pandas.DataFrame` should be used for its input or output type hint instead when the input
or output column is of :class:`pyspark.sql.types.StructType`. The following example shows
a Pandas UDF which takes long column, string column and struct column, and outputs a struct
column. It requires the function to specify the type hints of `pandas.Series` and
`pandas.DataFrame` as below:
>>> @pandas_udf("col1 string, col2 long")
>>> def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
... s3['col2'] = s1 + s2.str.len()
... return s3
...
>>> # Create a Spark DataFrame that has three columns including a sturct column.
... df = spark.createDataFrame(
... [[1, "a string", ("a nested string",)]],
... "long_col long, string_col string, struct_col struct<col1:string>")
>>> df.printSchema()
root
|-- long_column: long (nullable = true)
|-- string_column: string (nullable = true)
|-- struct_column: struct (nullable = true)
| |-- col1: string (nullable = true)
>>> df.select(func("long_col", "string_col", "struct_col")).printSchema()
|-- func(long_col, string_col, struct_col): struct (nullable = true)
| |-- col1: string (nullable = true)
| |-- col2: long (nullable = true)
In the following sections, it describes the cominations of the supported type hints. For
simplicity, `pandas.DataFrame` variant is omitted.
* Series to Series
`pandas.Series`, ... -> `pandas.Series`
The function takes one or more `pandas.Series` and outputs one `pandas.Series`.
The output of the function should always be of the same length as the input.
>>> @pandas_udf("string")
... def to_upper(s: pd.Series) -> pd.Series:
... return s.str.upper()
...
>>> df = spark.createDataFrame([("John Doe",)], ("name",))
>>> df.select(to_upper("name")).show()
+--------------+
|to_upper(name)|
+--------------+
| JOHN DOE|
+--------------+
>>> @pandas_udf("first string, last string")
... def split_expand(s: pd.Series) -> pd.DataFrame:
... return s.str.split(expand=True)
...
>>> df = spark.createDataFrame([("John Doe",)], ("name",))
>>> df.select(split_expand("name")).show()
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of the input is not that of the whole input column, but is the
length of an internal batch used for each call to the function.
* Iterator of Series to Iterator of Series
`Iterator[pandas.Series]` -> `Iterator[pandas.Series]`
The function takes an iterator of `pandas.Series` and outputs an iterator of
`pandas.Series`. In this case, the created pandas UDF instance requires one input
column when this is called as a PySpark column. The output of each series from
the function should always be of the same length as the input.
It is useful when the UDF execution
requires initializing some states although internally it works identically as
Series to Series case. The pseudocode below illustrates the example.
.. highlight:: python
.. code-block:: python
@pandas_udf("long")
def calculate(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
# Do some expensive initialization with a state
state = very_expensive_initialization()
for x in iterator:
# Use that state for whole iterator.
yield calculate_with_state(x, state)
df.select(calculate("value")).show()
>>> from typing import Iterator
>>> @pandas_udf("long")
... def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
... for s in iterator:
... yield s + 1
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.select(plus_one(df.v)).show()
+-----------+
|plus_one(v)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
.. note:: The length of each series is the length of a batch internally used.
* Iterator of Multiple Series to Iterator of Series
`Iterator[Tuple[pandas.Series, ...]]` -> `Iterator[pandas.Series]`
The function takes an iterator of a tuple of multiple `pandas.Series` and outputs an
iterator of `pandas.Series`. In this case, the created pandas UDF instance requires
input columns as many as the series when this is called as a PySpark column.
It works identically as Iterator of Series to Iterator of Series case except
the parameter difference. The output of each series from the function should always
be of the same length as the input.
>>> from typing import Iterator, Tuple
>>> from pyspark.sql.functions import struct, col
>>> @pandas_udf("long")
... def multiply(iterator: Iterator[Tuple[pd.Series, pd.DataFrame]]) -> Iterator[pd.Series]:
... for s1, df in iterator:
... yield s1 * df.v
...
>>> df = spark.createDataFrame(pd.DataFrame([1, 2, 3], columns=["v"]))
>>> df.withColumn('output', multiply(col("v"), struct(col("v")))).show()
+---+------+
| v|output|
+---+------+
| 1| 1|
| 2| 4|
| 3| 9|
+---+------+
.. note:: The length of each series is the length of a batch internally used.
* Series to Scalar
`pandas.Series`, ... -> `Any`
The function takes `pandas.Series` and returns a scalar value. The `returnType`
should be a primitive data type, and the returned scalar can be either a python primitive
type, e.g., int or float or a numpy data type, e.g., numpy.int64 or numpy.float64.
`Any` should ideally be a specific scalar type accordingly.
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> df.groupby("id").agg(mean_udf(df['v'])).show()
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This UDF can also be used as window functions as below:
>>> from pyspark.sql import Window
>>> @pandas_udf("double")
... def mean_udf(v: pd.Series) -> float:
... return v.mean()
...
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v"))
>>> w = Window.partitionBy('id').orderBy('v').rowsBetween(-1, 0)
>>> df.withColumn('mean_v', mean_udf("v").over(w)).show()
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined `returnType` (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
.. note:: Currently,
:class:`pyspark.sql.types.MapType`,
:class:`pyspark.sql.types.ArrayType` of :class:`pyspark.sql.types.TimestampType` and
nested :class:`pyspark.sql.types.StructType`
are currently not supported as output types.
.. seealso:: :meth:`pyspark.sql.DataFrame.mapInPandas`
.. seealso:: :meth:`pyspark.sql.GroupedData.applyInPandas`
.. seealso:: :meth:`pyspark.sql.PandasCogroupedOps.applyInPandas`
.. seealso:: :meth:`pyspark.sql.UDFRegistration.register`
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| ''| ''| ''| '\x01'| '\x01'| ''| ''| '\x01'| '\x01'| ''| ''| ''| X| X| 'a'| X| X| ''| X| ''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')| X| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 0.24.2 and PyArrow 0.13.0 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
require_minimum_pandas_version()
require_minimum_pyarrow_version()
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = None
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = None
if return_type is None:
raise ValueError("Invalid return type: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
None]: # None means it should infer the type from type hints.
raise ValueError("Invalid function type: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_pandas_udf, returnType=return_type, evalType=eval_type)
else:
return _create_pandas_udf(f=f, returnType=return_type, evalType=eval_type)
def _create_pandas_udf(f, returnType, evalType):
argspec = _get_argspec(f)
# pandas UDF by type hints.
if sys.version_info >= (3, 6):
from inspect import signature
if evalType in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
warnings.warn(
"In Python 3.6+ and Spark 3.0+, it is preferred to specify type hints for "
"pandas UDF instead of specifying pandas UDF type which will be deprecated "
"in the future releases. See SPARK-28264 for more details.", UserWarning)
elif len(argspec.annotations) > 0:
evalType = infer_eval_type(signature(f))
assert evalType is not None
if evalType is None:
# Set default is scalar UDF.
evalType = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) and \
len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udf with function type GROUPED_MAP or "
"the function in groupby.applyInPandas "
"must take either one argument (data) or two arguments (key, data).")
if evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (2, 3):
raise ValueError(
"Invalid function: the function in cogroup.applyInPandas "
"must take either two arguments (left, right) "
"or three arguments (key, left, right).")
return _create_udf(f, returnType, evalType)
| apache-2.0 |
cdegroc/scikit-learn | sklearn/neighbors/graph.py | 14 | 2839 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD, (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def kneighbors_graph(X, n_neighbors, mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors).fit(X)
return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius).fit(X)
return X.radius_neighbors_graph(X._fit_X, radius, mode)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.