repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
rahuldhote/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
ThomasMiconi/nupic.research | htmresearch/support/sp_paper_utils.py | 6 | 11432 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
from htmresearch.frameworks.sp_paper.sp_metrics import (
calculateInputOverlapMat, percentOverlap
)
from nupic.bindings.math import GetNTAReal
realDType = GetNTAReal()
uintType = "uint32"
def plotPermInfo(permInfo):
fig, ax = plt.subplots(5, 1, sharex=True)
ax[0].plot(permInfo['numConnectedSyn'])
ax[0].set_title('connected syn #')
ax[1].plot(permInfo['numNonConnectedSyn'])
ax[0].set_title('non-connected syn #')
ax[2].plot(permInfo['avgPermConnectedSyn'])
ax[2].set_title('perm connected')
ax[3].plot(permInfo['avgPermNonConnectedSyn'])
ax[3].set_title('perm unconnected')
# plt.figure()
# plt.subplot(3, 1, 1)
# plt.plot(perm - initialPermanence[columnIndex, :])
# plt.subplot(3, 1, 2)
# plt.plot(truePermanence - initialPermanence[columnIndex, :], 'r')
# plt.subplot(3, 1, 3)
# plt.plot(truePermanence - perm, 'r')
def plotAccuracyVsNoise(noiseLevelList, predictionAccuracy):
plt.figure()
plt.plot(noiseLevelList, predictionAccuracy, '-o')
plt.ylim([0, 1.05])
plt.xlabel('Noise level')
plt.ylabel('Prediction Accuracy')
def plotSPstatsOverTime(metrics, fileName=None):
fig, axs = plt.subplots(nrows=5, ncols=1, sharex=True)
metrics['stability'][0] = float('nan')
metrics['numNewSyn'][0] = float('nan')
metrics['numRemoveSyn'][0] = float('nan')
axs[0].plot(metrics['stability'])
axs[0].set_ylabel('Stability')
axs[1].plot(metrics['entropy'])
maxEntropy = metrics['maxEntropy']
maxEntropy = np.ones(len(maxEntropy)) * np.median(maxEntropy)
axs[1].plot(maxEntropy, 'k--')
axs[1].set_ylabel('Entropy (bits)')
if len(metrics['noiseRobustness']) > 0:
axs[2].plot(metrics['noiseRobustness'])
axs[2].set_ylabel('Noise Robustness')
axs[3].plot(metrics['numNewSyn'])
axs[3].set_ylabel('Synapses Formation')
axs[4].plot(metrics['numRemoveSyn'])
axs[4].set_ylabel('Synapse Removal')
axs[4].set_xlim([0, len(metrics['numRemoveSyn'])])
axs[4].set_xlabel('epochs')
if fileName is not None:
plt.savefig(fileName)
return axs
def plotReceptiveFields2D(sp, Nx, Ny):
inputSize = Nx * Ny
numColumns = np.product(sp.getColumnDimensions())
nrows = 4
ncols = 4
fig, ax = plt.subplots(nrows, ncols)
for r in range(nrows):
for c in range(ncols):
colID = np.random.randint(numColumns)
connectedSynapses = np.zeros((inputSize,), dtype=uintType)
sp.getConnectedSynapses(colID, connectedSynapses)
receptiveField = np.reshape(connectedSynapses, (Nx, Ny))
ax[r, c].imshow(1-receptiveField, interpolation="nearest", cmap='gray')
# ax[r, c].set_title('col {}'.format(colID))
ax[r, c].set_xticks([])
ax[r, c].set_yticks([])
def plotReceptiveFields(sp, nDim1=8, nDim2=8):
"""
Plot 2D receptive fields for 16 randomly selected columns
:param sp:
:return:
"""
columnNumber = np.product(sp.getColumnDimensions())
fig, ax = plt.subplots(nrows=4, ncols=4)
for rowI in range(4):
for colI in range(4):
col = np.random.randint(columnNumber)
connectedSynapses = np.zeros((nDim1*nDim2,), dtype=uintType)
sp.getConnectedSynapses(col, connectedSynapses)
receptiveField = connectedSynapses.reshape((nDim1, nDim2))
ax[rowI, colI].imshow(receptiveField, cmap='gray')
ax[rowI, colI].set_title("col: {}".format(col))
def plotReceptiveFieldCenter(RFcenters, connectedCounts, inputDims,
minConnection=None, maxConnection=None):
nX, nY = inputDims
import matplotlib.cm as cm
cmap = cm.get_cmap('jet')
if minConnection is None:
minConnection = np.min(connectedCounts)
if maxConnection is None:
maxConnection = np.max(connectedCounts)
fig = plt.figure()
sc = plt.scatter(RFcenters[:, 0], RFcenters[:, 1],
vmin=minConnection, vmax=maxConnection,
c=connectedCounts, cmap=cmap)
plt.colorbar(sc)
plt.axis('equal')
plt.xlim([-1, nX + 1])
plt.ylim([-1, nY + 1])
return fig
def plotBoostTrace(sp, inputVectors, columnIndex):
"""
Plot boostfactor for a selected column
Note that learning is ON for SP here
:param sp: sp instance
:param inputVectors: input data
:param columnIndex: index for the column of interest
"""
numInputVector, inputSize = inputVectors.shape
columnNumber = np.prod(sp.getColumnDimensions())
boostFactorsTrace = np.zeros((columnNumber, numInputVector))
activeDutyCycleTrace = np.zeros((columnNumber, numInputVector))
minActiveDutyCycleTrace = np.zeros((columnNumber, numInputVector))
for i in range(numInputVector):
outputColumns = np.zeros(sp.getColumnDimensions(), dtype=uintType)
inputVector = copy.deepcopy(inputVectors[i][:])
sp.compute(inputVector, True, outputColumns)
boostFactors = np.zeros((columnNumber, ), dtype=realDType)
sp.getBoostFactors(boostFactors)
boostFactorsTrace[:, i] = boostFactors
activeDutyCycle = np.zeros((columnNumber, ), dtype=realDType)
sp.getActiveDutyCycles(activeDutyCycle)
activeDutyCycleTrace[:, i] = activeDutyCycle
minActiveDutyCycle = np.zeros((columnNumber, ), dtype=realDType)
sp.getMinActiveDutyCycles(minActiveDutyCycle)
minActiveDutyCycleTrace[:, i] = minActiveDutyCycle
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(boostFactorsTrace[columnIndex, :])
plt.ylabel('Boost Factor')
plt.subplot(2, 1, 2)
plt.plot(activeDutyCycleTrace[columnIndex, :])
plt.plot(minActiveDutyCycleTrace[columnIndex, :])
plt.xlabel(' Time ')
plt.ylabel('Active Duty Cycle')
def analyzeReceptiveFieldSparseInputs(inputVectors, sp):
numColumns = np.product(sp.getColumnDimensions())
overlapMat = calculateInputOverlapMat(inputVectors, sp)
sortedOverlapMat = np.zeros(overlapMat.shape)
for c in range(numColumns):
sortedOverlapMat[c, :] = np.sort(overlapMat[c, :])
avgSortedOverlaps = np.flipud(np.mean(sortedOverlapMat, 0))
plt.figure()
plt.plot(avgSortedOverlaps, '-o')
plt.xlabel('sorted input vector #')
plt.ylabel('percent overlap')
plt.figure()
plt.imshow(overlapMat[:100, :], interpolation="nearest", cmap="magma")
plt.xlabel("Input Vector #")
plt.ylabel("SP Column #")
plt.colorbar()
plt.title('percent overlap')
def analyzeReceptiveFieldCorrelatedInputs(
inputVectors, sp, params, inputVectors1, inputVectors2):
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
numInputVector1 = params['numInputVectorPerSensor']
numInputVector2 = params['numInputVectorPerSensor']
w = params['numActiveInputBits']
inputSize1 = int(params['inputSize']/2)
inputSize2 = int(params['inputSize']/2)
connectedCounts = np.zeros((columnNumber,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
numColumns = np.product(sp.getColumnDimensions())
overlapMat1 = np.zeros((numColumns, inputVectors1.shape[0]))
overlapMat2 = np.zeros((numColumns, inputVectors2.shape[0]))
numColumns = np.product(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
for c in range(numColumns):
connectedSynapses = np.zeros((inputSize,), dtype=uintType)
sp.getConnectedSynapses(c, connectedSynapses)
for i in range(inputVectors1.shape[0]):
overlapMat1[c, i] = percentOverlap(connectedSynapses[:inputSize1],
inputVectors1[i, :inputSize1])
for i in range(inputVectors2.shape[0]):
overlapMat2[c, i] = percentOverlap(connectedSynapses[inputSize1:],
inputVectors2[i, :inputSize2])
sortedOverlapMat1 = np.zeros(overlapMat1.shape)
sortedOverlapMat2 = np.zeros(overlapMat2.shape)
for c in range(numColumns):
sortedOverlapMat1[c, :] = np.sort(overlapMat1[c, :])
sortedOverlapMat2[c, :] = np.sort(overlapMat2[c, :])
fig, ax = plt.subplots(nrows=2, ncols=2)
ax[0, 0].plot(np.mean(sortedOverlapMat1, 0), '-o')
ax[0, 1].plot(np.mean(sortedOverlapMat2, 0), '-o')
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].imshow(overlapMat1[:100, :], interpolation="nearest", cmap="magma")
ax[0].set_xlabel('# Input 1')
ax[0].set_ylabel('SP Column #')
ax[1].imshow(overlapMat2[:100, :], interpolation="nearest", cmap="magma")
ax[1].set_xlabel('# Input 2')
ax[1].set_ylabel('SP Column #')
def runSPOnBatch(sp, inputVectors, learn, sdrOrders=None, verbose=0):
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
if sdrOrders is None:
sdrOrders = range(numInputVector)
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
if learn:
avgBoostFactors = np.zeros((numColumns,), dtype=realDType)
else:
avgBoostFactors = np.ones((numColumns,), dtype=realDType)
for i in range(numInputVector):
sp.compute(inputVectors[sdrOrders[i]][:], learn, outputColumns[sdrOrders[i]][:])
if learn:
boostFactors = np.zeros((numColumns,), dtype=realDType)
sp.getBoostFactors(boostFactors)
avgBoostFactors += boostFactors
if verbose > 0:
if i % 100 == 0:
print "{} % finished".format(100 * float(i) / float(numInputVector))
if learn:
avgBoostFactors = avgBoostFactors/numInputVector
return outputColumns, avgBoostFactors
def createDirectories(expName):
paths = []
paths.append('./results/traces/{}/'.format(expName))
paths.append('./results/InputCoverage/{}/'.format(expName))
paths.append('./results/classification/{}/'.format(expName))
paths.append('./results/input_output_overlap/{}/'.format(expName))
paths.append('./figures/InputCoverage/{}/'.format(expName))
paths.append('./figures/exampleRFs/{}/'.format(expName))
paths.append('./figures/ResponseToTestInputs/{}/'.format(expName))
paths.append('./figures/RFcenters/{}/'.format(expName))
paths.append('./figures/avgInputs/{}/'.format(expName))
paths.append('./figures/inputOverlaps/{}/'.format(expName))
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
def getConnectedSyns(sp):
numInputs = sp.getNumInputs()
numColumns = np.prod(sp.getColumnDimensions())
connectedSyns = np.zeros((numColumns, numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSyns[columnIndex, :])
connectedSyns = connectedSyns.astype('float32')
return connectedSyns | agpl-3.0 |
chrsrds/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 21 | 2391 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf', gamma=.5).fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, edgecolors='black')
plt.title(titles[i])
plt.suptitle("Unlabeled points are colored white", y=0.1)
plt.show()
| bsd-3-clause |
rudhir-upretee/Sumo_With_Netsim | tools/visualization/mpl_dump_twoAgainst.py | 3 | 7534 | #!/usr/bin/env python
"""
@file mpl_dump_twoAgainst.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_twoAgainst.py 11671 2012-01-07 20:14:30Z behrisch $
This script reads two dump files and plots one of the values
stored therein as an x-/y- plot.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toColor(val):
"""Converts the given value (0-1) into a color definition as parseable by matplotlib"""
g = 255. * val
return "#" + toHex(g) + toHex(g) + toHex(g)
def updateMinMax(min, max, value):
if min==None or min>value:
min = value
if max==None or max<value:
max = value
return (min, max)
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, value):
self._id = ''
self._edge2value = {}
self._edge2no = {}
self._value = value
def startElement(self, name, attrs):
if name == 'interval':
self._time = int(attrs['begin'])
self._edge2value[self._time] = {}
if name == 'edge':
self._id = attrs['id']
if self._id not in self._edge2value[self._time]:
self._edge2value[self._time][self._id] = 0.
self._edge2value[self._time][self._id] = self._edge2value[self._time][self._id] + float(attrs[self._value])
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--dump1", dest="dump1",
help="First dump (mandatory)", metavar="FILE")
optParser.add_option("-2", "--dump2", dest="dump2",
help="Second dump (mandatory)", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="Name of the image to generate", metavar="FILE")
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("--value", dest="value",
type="string", default="speed", help="which value shall be used")
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows plot after generating it")
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="aggregates each edge's values")
optParser.add_option("-C", "--time-coloring", action="store_true", dest="time_coloring",
default=False, help="colors the points by the time")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
parser = make_parser()
# read dump1
if options.verbose:
print "Reading dump1..."
weights1 = WeightsReader(options.value)
parser.setContentHandler(weights1)
parser.parse(options.dump1)
# read dump2
if options.verbose:
print "Reading dump2..."
weights2 = WeightsReader(options.value)
parser.setContentHandler(weights2)
parser.parse(options.dump2)
# plot
if options.verbose:
print "Processing data..."
# set figure size
if not options.show:
rcParams['backend'] = 'Agg'
if options.size:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
xs = []
ys = []
# compute values and color(s)
c = 'k'
min = None
max = None
if options.join:
values1 = {}
values2 = {}
nos1 = {}
nos2 = {}
for t in weights1._edge2value:
for edge in weights1._edge2value[t]:
if edge not in values1:
nos1[edge] = 0
values1[edge] = 0
nos1[edge] = nos1[edge] + 1
values1[edge] = values1[edge] + weights1._edge2value[t][edge]
if t in weights2._edge2value:
for edge in weights2._edge2value[t]:
if edge not in values2:
nos2[edge] = 0
values2[edge] = 0
nos2[edge] = nos2[edge] + 1
values2[edge] = values2[edge] + weights2._edge2value[t][edge]
for edge in values1:
if edge in values2:
xs.append(values1[edge] / nos1[edge])
ys.append(values2[edge] / nos2[edge])
(min, max) = updateMinMax(min, max, values1[edge] / nos1[edge])
(min, max) = updateMinMax(min, max, values2[edge] / nos2[edge])
else:
if options.time_coloring:
c = []
for t in weights1._edge2value:
if options.time_coloring:
xs.append([])
ys.append([])
cc = 1. - ((float(t) / 86400.) * .8 + .2)
c.append(toColor(cc))
for edge in weights1._edge2value[t]:
if t in weights2._edge2value and edge in weights2._edge2value[t]:
xs[-1].append(weights1._edge2value[t][edge])
ys[-1].append(weights2._edge2value[t][edge])
(min, max) = updateMinMax(min, max, weights1._edge2value[t][edge])
(min, max) = updateMinMax(min, max, weights2._edge2value[t][edge])
else:
for edge in weights1._edge2value[t]:
if t in weights2._edge2value and edge in weights2._edge2value[t]:
xs.append(weights1._edge2value[t][edge])
ys.append(weights2._edge2value[t][edge])
(min, max) = updateMinMax(min, max, weights1._edge2value[t][edge])
(min, max) = updateMinMax(min, max, weights2._edge2value[t][edge])
# plot
print "data range: " + str(min) + " - " + str(max)
if options.verbose:
print "Plotting..."
if options.time_coloring and iterable(c):
for i in range(0, len(c)):
plot(xs[i], ys[i], '.', color=c[i], mfc=c[i])
else:
plot(xs, ys, ',', color=c)
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(min, max)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(min, max)
# show/save
if options.show:
show()
if options.output:
savefig(options.output);
| gpl-3.0 |
rhyolight/nupic | src/nupic/algorithms/monitor_mixin/plot.py | 20 | 5229 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress this optional dependency on matplotlib. NOTE we don't log this,
# because python logging implicitly adds the StreamHandler to root logger when
# calling `logging.debug`, etc., which may undermine an application's logging
# configuration.
plt = None
cm = None
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| agpl-3.0 |
shanzhenren/ClusType | src/algorithm.py | 1 | 10630 | from collections import defaultdict
from operator import itemgetter
from math import log, sqrt
import random as rn
import time
from numpy import * # install numpy
from scipy import * # install scipy
from numpy.linalg import norm
import numpy.linalg as npl
from scipy.sparse import *
import scipy.sparse.linalg as spsl
from sklearn.preprocessing import normalize ### install from http://scikit-learn.org/stable/
def create_matrix(size_row, size_col):
return csr_matrix((size_row, size_col))
def create_dense_matrix(size_row, size_col):
return mat(zeros((size_row, size_col)))
def set_Y(train_mid, seedMention_tid_score, mid_mention, size_row, size_col):
row = []
col = []
val = []
num_NIL = 0
num_target = 0
NIL_set = set()
for mid in train_mid:
# in training set
mention = mid_mention[mid]
if mention in seedMention_tid_score:
# in ground truth
tid = seedMention_tid_score[mention][0]
score = seedMention_tid_score[mention][1]
if tid == (size_col - 1):
# NIL
num_NIL += 1
# NIL_set.add((mid, tid, score))
NIL_set.add((mid, tid, 1.0))
else:
num_target += 1
row.append(mid)
col.append(tid)
# val.append(score)
val.append(1.0)
if num_target < 1:
print 'No target type entity seeded!!!!'
### random sample NIL examples
# neg_size = num_NIL
neg_size = min(num_NIL, 5*num_target)
# neg_size = int(min(num_NIL, num_target/(size_col-1.0)))
neg_example = rn.sample(NIL_set, neg_size)
for entry in neg_example:
row.append(entry[0])
col.append(entry[1])
val.append(entry[2])
Y = coo_matrix((val, (row, col)), shape = (size_row, size_col)).tocsr()
# print Y.nnz, '#ground truth mentions in Y'
print 'Percent Seeded Mention:', (Y.nnz+0.0)/len(mid_mention) * 100, '% of', len(mid_mention), \
', #target/All = ', num_target/(Y.nnz+0.0) * 100
return Y
def update_Y_closed_form(S_M, Y, Y0, Theta, PiC, gamma, mu):
# row = []
# col = []
# val = []
for j in range(PiC.shape[1]):
# for each candidate j, slicing to get submatrix
mid_list = PiC[:, j].nonzero()[0].tolist()
Y0_j = Y0[mid_list, :]
Theta_j = Theta[mid_list, :]
S_M_j = S_M[mid_list, :][:, mid_list]
if S_M_j.shape[0] * S_M_j.shape[1] < 2520800000:
# transform to dense matrix
tmp = ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j).todense()
Y_j = npl.inv(tmp) * (Theta_j + mu*Y0_j)
Y[mid_list, :] = Y_j
# # sparse
# Yc = spsl.inv((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j) * (Theta_j + mu*Y0_j)
# Yc = spsl.spsolve( ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j), (Theta_j + mu*Y0_j) )
# row_idx, col_idx = Yc.nonzero()
# for i in range(len(row_idx)):
# mid = mid_list[row_idx[i]]
# row.append(mid)
# col.append(col_idx[i])
# val.append(Yc[row_idx[i], col_idx[i]])
if j % 1000 == 0:
print 'candidate', j
# Y = coo_matrix((val, (row, col)), shape = Y0.shape).tocsr()
return Y
def inverse_matrix(X):
X.data[:] = 1/(X.data)
return X
def clustype_appx(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, K):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
def clustype_noClus_inner(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, tol, C, PL, PR, Y):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
rel = abs(obj_old - obj)/obj_old
if (i+1) % 10 == 0:
print '\tClusType_noClus_inner Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
if rel < tol:
print ' ClusType_noClus_inner Converges!'
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
print ' ClusType_noClus_inner Reach MaxIter!'
return (Y, C, PL, PR)
def clustype_noClus_PiLR(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### pre-compuatation #############################################################
m = PiC.shape[0]
n, l = S_L.shape
PiLL = PiL.T*PiL # l-by-l
PiRR = PiR.T*PiR # l-by-l
### initialization #############################################################
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O*(norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + theta + mu*Y0)
C = 1/2.0 * ( S_L*PL + S_R*PR )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * lambda_O*PiL.T*(Y-PiR*PR)
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * lambda_O*PiR.T*(Y-PiL*PL)
obj_old = obj
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiL*PL + PiR*PR
return Y
def clustype_noClus_PiC(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + PiC*C + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*Y )
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return Y
def clustype_onlycandidate(S_L, S_R, PiC, PiL, PiR, Y0, T, ITER):
### pre-compuatation #############################################################
u = 0.5 # u=0.5
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C0 = PiC.T * Y0
C = C0.copy()
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
### Start algorithm #############################################################
for i in range(ITER):
C = 1/(2+u) * (S_L*PL + S_R*PR + u*C0)
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
if (i+1) % 10 == 0:
print 'ClusType_Cand Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return (Y, C, PL, PR)
| gpl-3.0 |
neale/softmax | softmax_regression.py | 1 | 7400 | #!/usr/bin/env python
import sys
import time
import numpy as np
import random
import matplotlib.pyplot as plt
from collections import defaultdict
cost = 0.0
lrate = .1
grad = np.array([])
lambda_factor = 0.0
nclasses = 2
def vector_to_collumn(vec):
length = len(vec)
A = np.array(vec)
for i in range(length):
A[i] = vec[i]
A.reshape(len(A), 1)
return A
def vector_to_row(vec):
A = vector_to_collumn(vec)
A = A.transpose()
return A
def rows_collumns(mat):
return (len(mat), len(mat[0]))
def vector_to_matrix(vec):
rows = len(vec[0])
cols = len(vec)
mat = np.zeros(shape=(rows, cols))
for i in xrange(rows):
for j in xrange(cols):
mat[i][j] = vec[j][i]
return mat
def update_costFunction_gradient(mat_x, row, weights, lambda_factor):
nsamples = len(mat_x[0])
nfeatures = len(mat_x)
theta = np.asarray(weights)
### print stats for data structures ###
#print "################ INTERMEDIARY STATS ####################"
#print "weights: col", len(weights[0]), "row", len(weights), '\n'
#print "mat_x: col", len(mat_x[0]), "row", len(mat_x), '\n'
#print "theta: col", len(theta[0]), "row", len(theta), '\n'
#print "\ntheta:", theta
#print "########################################################"
M = np.dot(theta,mat_x)
max = np.amax(M, axis=0) #get max element in the collumn
temp = np.tile(max, (nclasses, 1))
M = np.subtract(M, temp)
M = np.exp(M)
mat_sum = np.sum(M, axis=0) #returns an array of sums across collumns
temp = np.tile(mat_sum, (nclasses, 1))
M = M / temp
groundTruth = np.zeros(shape=(nclasses, nsamples))
for i in xrange(len(row)):
a = row[i]
groundTruth[a][i] = 1
temp = groundTruth * np.log(M)
sum_cost = np.sum(temp)
cost = -sum_cost / nsamples
cost += np.dot(np.sum(theta**2), (lambda_factor/2))
#calc gradient
temp = np.subtract(groundTruth, M)
temp = np.dot(temp, mat_x.transpose())
grad = -temp / nsamples
grad += np.dot(lambda_factor, theta)
def calculate(mat_x, weights):
theta = np.asarray(weights)
M = np.dot(theta, mat_x)
M_max = np.amax(M, axis=0)
temp = np.tile(M_max, (nclasses, 1))
M = np.subtract(M, temp)
M = np.exp(M)
mat_sum = np.sum(M, axis=0)
temp = np.tile(mat_sum, (nclasses, 1))
M = M / temp
M = np.log(M)
res = np.zeros(len(M[0]))
for i in xrange(len(M[0])):
maxele = -sys.maxint
which = 0
for j in xrange(len(M)):
if M[j][i] > maxele:
maxele = M[j][i]
which = j
res[i] = which
return res
def softmax(vecX, vecY, testX, testY):
nsamples = len(vecX)
nfeatures = len(vecX[0])
# change vecX and vecY into matrix or vector
#print "################ INTERMEDIARY STATS ####################"
#print "type of vecX:", type(vecX)
#print "type of vecY:", type(vecY)
#print "length of vecX:", len(vecX), '\n'
#print "length of vecY:", len(vecY), '\n'
y = vector_to_row(vecY)
x = vector_to_matrix(vecX)
init_epsilon = 0.12
weights = np.zeros(shape=(nclasses, nfeatures))
weights = [[random.uniform(0, 1) for num in list] for list in weights]
weights = np.dot(weights,2 * init_epsilon)
weights -= init_epsilon
grad = np.zeros(shape=(nclasses, nfeatures))
# Gradient Checking (remember to disable this part after you're sure the
# cost function and dJ function are correct)
update_costFunction_gradient(x, y, weights, lambda_factor);
dJ = np.matrix(grad);
dJ = np.asarray(grad)
print "\ntest!!!!\n"
epsilon = 1e-4
for i in range(len(weights)):
for j in range(len(weights[0])):
memo = weights[i][j]
weights[i][j] = memo + epsilon;
update_costFunction_gradient(x, y, weights, lambda_factor);
value1 = cost;
weights[i][j] = memo - epsilon;
update_costFunction_gradient(x, y, weights, lambda_factor);
value2 = cost;
tp = (value1 - value2) / (2 * epsilon)
weights[i][j] = memo;
converge = 0
lastcost = 0.0
while converge < 5000:
update_costFunction_gradient(x, y, weights, lambda_factor)
weights -= lrate * grad
if abs((cost - lastcost)) <= 5e-6 and converge > 0:
break
lastcost = cost
converge += 1
print "########### result ############\n"
yT = vector_to_row(testY)
xT = vector_to_matrix(testX)
res = calculate(xT, weights)
error = yT - res
correct = len(error)
for i in range(len(error)):
if error[i] != 0:
correct -= 1
print "correct: {}, total: {}, accuracy: {}".format(correct,len(error),(correct/len(error)))
if __name__ == "__main__":
file1 = "trainX.txt"
file2 = "trainY.txt"
file3 = "testX.txt"
file4 = "testY.txt"
#np.set_printoptions(precision=3)
# train with file 1
with open(file1, "r+") as f1:
numofX = 30
counter = 0
array = []
vecX = [[]]
tpdouble = 0
try:
for line in f1:
line_nums = line.split()
for num in line_nums:
array.append(eval(num))
except:
print('f1) Ignoring: malformed line: "{}"'.format(line))
for tpdouble in array:
if counter/numofX >= len(vecX):
tpvec = []
vecX.append(tpvec)
vecX[counter/numofX].append(tpdouble)
counter += 1
f1.close()
# train with file 2
with open(file2, "r+") as f2 :
vecY, array = [], []
try:
for line in f2:
line_nums = line.split()
for num in line_nums:
array.append(eval(num))
except:
print('f2) Ignoring: malformed line: "{}"'.format(line))
for tpdouble in array:
vecY.append(tpdouble)
f2.close()
for i in range(1, len(vecX)):
if len(vecX[i]) != len(vecX[i - 1]):
sys.exit(0)
assert len(vecX) == len(vecY)
if len(vecX) != len(vecY):
sys.exit(0)
#test against file 3
with open(file3, "r") as f3:
vecTX, array = [[]], []
counter = 0
try:
for line in f3:
line_nums = line.split()
for num in line_nums:
array.append(eval(num))
except:
print('f3) Ignoring: malformed line: "{}"'.format(line))
for tpdouble in array:
if counter/numofX >= len(vecTX):
tpdouble = []
vecTX.append(tpvec)
vecTX[counter/numofX].append(tpdouble)
counter += 1
f3.close()
# test against file 4
with open(file4, "r") as f4:
vecTY, array = [], []
try:
for line in f4:
line_nums = line.split()
for num in line_nums:
array.append(eval(num))
except:
print('f4) Ignoring: malformed line: "{}"'.format(line))
for tpdouble in array:
vecTY.append(tpdouble)
f4.close()
start = time.clock()
softmax(vecX, vecY, vecTX, vecTY)
end = time.clock()
sys.exit(0)
| unlicense |
OFAI/million-post-corpus | experiments/src/evaluate_lstm.py | 1 | 13644 | import math
import multiprocessing
import os
import warnings
from gensim.models.word2vec import Word2Vec
import numpy
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import OneHotEncoder
from customlogging import logger
from preprocessing import normalize, micro_tokenize
import conf
class LSTMModel(object):
def __init__(self, emb, num_classes):
self.data = tf.placeholder(tf.int32,
[conf.LSTM_BATCHSIZE, conf.LSTM_MAXPOSTLEN])
self.target = tf.placeholder(tf.float32,
[conf.LSTM_BATCHSIZE, num_classes])
self.lengths = tf.placeholder(tf.int32, [conf.LSTM_BATCHSIZE])
self.dropout_lstm = tf.placeholder(tf.float32)
self.dropout_fully = tf.placeholder(tf.float32)
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W_emb = tf.Variable(emb.wv.syn0, name="W")
self.embedded = tf.nn.embedding_lookup(self.W_emb, self.data)
self.cell = tf.contrib.rnn.LSTMCell(conf.LSTM_HIDDEN,
state_is_tuple=True)
# dropout for LSTM cell
self.cell = tf.contrib.rnn.DropoutWrapper(cell=self.cell,
output_keep_prob=self.dropout_lstm)
# add sequence_length to dynamic_rnn
self.val, self.state = tf.nn.dynamic_rnn(self.cell, self.embedded,
dtype=tf.float32, sequence_length=self.lengths)
out_size = int(self.val.get_shape()[2])
index = tf.range(0, conf.LSTM_BATCHSIZE)
index = index * conf.LSTM_MAXPOSTLEN + self.lengths - 1
flat = tf.reshape(self.val, [-1, out_size])
self.last = tf.gather(flat, index)
# dropout for fully-connected layer
self.last_drop = tf.nn.dropout(self.last, self.dropout_fully)
self.weight = tf.Variable(tf.truncated_normal(
[conf.LSTM_HIDDEN, int(self.target.get_shape()[1])]))
self.bias = tf.Variable(
tf.constant(0.1, shape=[self.target.get_shape()[1]]))
self.prediction = tf.nn.softmax(
tf.matmul(self.last_drop, self.weight) + self.bias)
self.cross_entropy = -tf.reduce_sum(
self.target * tf.log(tf.clip_by_value(self.prediction, 1e-10, 1.0)))
self.optimizer = tf.train.AdamOptimizer(
learning_rate=conf.LSTM_LEARNINGRATE)
self.minimize = self.optimizer.minimize(self.cross_entropy)
self.mistakes = tf.not_equal(
tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
self.error = tf.reduce_mean(tf.cast(self.mistakes, tf.float32))
self.init_op = tf.global_variables_initializer()
def plot_losses_f1s(losses, f1s_train,
precisions_vali, recalls_vali, f1s_vali,
precisions_test, recalls_test, f1s_test,
plotfile):
f, axes = plt.subplots(1, 4, figsize=(15,5), dpi=100)
axes[0].plot(losses)
axes[1].plot(f1s_train)
axes[2].plot(precisions_vali, color='red', label='Precision')
axes[2].plot(recalls_vali, color='green', label='Recall')
axes[2].plot(f1s_vali, color='blue', label='F1')
axes[3].plot(precisions_test, color='red', label='Precision')
axes[3].plot(recalls_test, color='green', label='Recall')
axes[3].plot(f1s_test, color='blue', label='F1')
# indicate epoch with highest validation F1
best_epoch = numpy.argmax(f1s_vali)
axes[0].axvline(best_epoch, color='#ffe100')
axes[1].axvline(best_epoch, color='#ffe100')
axes[2].axvline(best_epoch, color='#ffe100')
axes[3].axvline(best_epoch, color='#ffe100')
axes[0].set_title('Loss on Training Data')
axes[1].set_title('F1 on Training Data')
axes[2].set_title('Evaluation on Validation Data')
axes[3].set_title('Evaluation on Test Data')
axes[0].set_xlabel('Epoch')
axes[1].set_xlabel('Epoch')
axes[2].set_xlabel('Epoch')
axes[3].set_xlabel('Epoch')
axes[2].legend(loc='best', fontsize='small')
axes[3].legend(loc='best', fontsize='small')
axes[0].grid()
axes[1].grid()
axes[2].grid()
axes[3].grid()
f.tight_layout()
f.savefig(plotfile, dpi=100)
plt.close()
def preprocess(txt):
words = micro_tokenize(normalize(txt))
# sequences of length 0 can make the training crash (tf.gather)
if len(words) == 0:
words = [ 'asdfasdf' ]
return words
def stratified_batch_generator(X_orig, y_orig, lengths_orig, batchsize):
X = numpy.copy(X_orig)
y = numpy.copy(y_orig)
lengths = numpy.copy(lengths_orig)
shuffle_indices = numpy.random.permutation(numpy.arange(len(y)))
X = X[shuffle_indices]
y = y[shuffle_indices]
lengths = lengths[shuffle_indices]
cl0_indices = numpy.where(y[:,0] == 1)[0]
cl1_indices = numpy.where(y[:,1] == 1)[0]
ratio = y[:,0].sum() / len(y)
cl0perbatch = int(round(ratio * batchsize))
cl1perbatch = batchsize - cl0perbatch
cl0i = 0
cl1i = 0
while cl0i < len(cl0_indices) and cl1i < len(cl1_indices):
cl0end = min(cl0i + cl0perbatch, len(cl0_indices))
cl1end = min(cl1i + cl1perbatch, len(cl1_indices))
if (cl0end - cl0i) + (cl1end - cl1i) < batchsize:
cl0end = len(cl0_indices)
cl1end = len(cl1_indices)
batchindices = numpy.concatenate((
cl0_indices[cl0i:cl0end],
cl1_indices[cl1i:cl1end],
))
batchindices.sort()
batchX = X[batchindices]
batchy = y[batchindices]
batchlengths = lengths[batchindices]
yield (batchX, batchy, batchlengths)
cl0i = cl0end
cl1i = cl1end
def evaluate(cat, fold, txt_train, txt_test, y_train, y_test):
pool = multiprocessing.Pool()
wordlists_train = pool.map(preprocess, txt_train)
wordlists_test = pool.map(preprocess, txt_test)
pool.close()
pool.join()
emb = Word2Vec.load(os.path.join(conf.W2V_DIR, 'model'))
# add point at orign for unknown words
emb.wv.syn0 = numpy.vstack((emb.wv.syn0,
numpy.zeros(emb.wv.syn0.shape[1], dtype=numpy.float32)))
# train data: replace words with embedding IDs, zero-padding and truncation
X = numpy.zeros((len(y_train), conf.LSTM_MAXPOSTLEN), dtype=numpy.int32)
X_lengths = numpy.zeros((len(y_train)))
for i, words in enumerate(wordlists_train):
X_lengths[i] = len(words)
for j, w in enumerate(words):
if j >= conf.LSTM_MAXPOSTLEN:
break
if w in emb:
X[i,j] = emb.vocab[w].index
else:
X[i,j] = len(emb.vocab)
# test data: replace words with embedding IDs, zero-padding and truncation
test_X = numpy.zeros((len(y_test), conf.LSTM_MAXPOSTLEN), dtype=numpy.int32)
test_lengths = numpy.zeros((len(y_test)))
for i, words in enumerate(wordlists_test):
test_lengths[i] = len(words)
for j, w in enumerate(words):
if j >= conf.LSTM_MAXPOSTLEN:
break
if w in emb:
test_X[i,j] = emb.vocab[w].index
else:
test_X[i,j] = len(emb.vocab)
# one-hot encode y
enc = OneHotEncoder()
y = enc.fit_transform(y_train.reshape(-1,1)).todense()
test_y = enc.transform(y_test.reshape(-1,1)).todense()
# split training data 80/20 into training and validation data for early
# stopping
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2,
random_state=conf.SEED)
train_i, vali_i = next(splitter.split(X, y_train))
X_vali = X[vali_i,:]
y_vali = y[vali_i,:]
vali_lengths = X_lengths[vali_i]
X = X[train_i,:]
y = y[train_i,:]
X_lengths = X_lengths[train_i]
numpy.random.seed(conf.SEED)
tf.set_random_seed(conf.SEED)
model = LSTMModel(emb, y.shape[1])
# The following, in combination with
# export CUDA_VISIBLE_DEVICES=""
# in the shell disables all parallelism, which leads to reproducible results
# but takes a very long time to complete
# sess = tf.Session(config=tf.ConfigProto(
# inter_op_parallelism_threads=1
# intra_op_parallelism_threads=1))
sess = tf.Session()
sess.run(model.init_op)
no_of_batches = math.ceil(len(X) / conf.LSTM_BATCHSIZE)
losses = []
f1s_train = []
precisions_vali = []
recalls_vali = []
f1s_vali = []
precisions_test = []
recalls_test = []
f1s_test = []
best_vali_f1 = -1.0
best_y_pred = []
for i in range(conf.LSTM_EPOCHS):
ptr = 0
totalloss = 0.0
predictions = []
true = []
batch_gen = stratified_batch_generator(X, y, X_lengths,
conf.LSTM_BATCHSIZE)
for inp, out, leng in batch_gen:
extra = conf.LSTM_BATCHSIZE - len(inp)
if extra > 0:
inp = numpy.vstack((inp, numpy.zeros((extra, inp.shape[1]))))
out = numpy.vstack((out, numpy.zeros((extra, out.shape[1]))))
leng = numpy.concatenate((leng, numpy.zeros(extra)))
_, loss, pred = sess.run(
[
model.minimize,
model.cross_entropy,
model.prediction
],
{
model.data: inp,
model.target: out,
model.lengths: leng,
model.dropout_lstm: conf.LSTM_DROPOUT_LSTM,
model.dropout_fully: conf.LSTM_DROPOUT_FULLY,
}
)
pred = list(numpy.argmax(pred, axis=1))
true.extend(out)
if extra > 0:
pred = pred[:-extra]
true = true[:-extra]
predictions.extend(pred)
totalloss += loss
losses.append(totalloss)
true = numpy.argmax(true, axis=1)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
f1s_train.append(f1_score(predictions, true))
# validation set F1
predictions = []
ptr2 = 0
for j in range(math.ceil(len(X_vali) / conf.LSTM_BATCHSIZE)):
inp2 = X_vali[ptr2:ptr2+conf.LSTM_BATCHSIZE]
leng = vali_lengths[ptr2:ptr2+conf.LSTM_BATCHSIZE]
extra = conf.LSTM_BATCHSIZE - len(inp2)
if extra > 0:
inp2 = numpy.vstack((inp2, numpy.zeros((extra, inp2.shape[1]))))
leng = numpy.concatenate((leng, numpy.zeros(extra)))
ptr2 += conf.LSTM_BATCHSIZE
pred = sess.run(model.prediction,
{
model.data: inp2,
model.lengths: leng,
model.dropout_lstm: 1.0,
model.dropout_fully: 1.0,
}
)
pred = list(numpy.argmax(pred, axis=1))
if extra > 0:
pred = pred[:-extra]
predictions.extend(pred)
true = numpy.argmax(y_vali, axis=1)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
precisions_vali.append(precision_score(predictions, true))
recalls_vali.append(recall_score(predictions, true))
f1s_vali.append(f1_score(predictions, true))
# test set F1
predictions = []
ptr2 = 0
for j in range(math.ceil(len(test_X) / conf.LSTM_BATCHSIZE)):
inp2 = test_X[ptr2:ptr2+conf.LSTM_BATCHSIZE]
leng = test_lengths[ptr2:ptr2+conf.LSTM_BATCHSIZE]
extra = conf.LSTM_BATCHSIZE - len(inp2)
if extra > 0:
inp2 = numpy.vstack((inp2, numpy.zeros((extra, inp2.shape[1]))))
leng = numpy.concatenate((leng, numpy.zeros(extra)))
ptr2 += conf.LSTM_BATCHSIZE
pred = sess.run(model.prediction,
{
model.data: inp2,
model.lengths: leng,
model.dropout_lstm: 1.0,
model.dropout_fully: 1.0,
}
)
pred = list(numpy.argmax(pred, axis=1))
if extra > 0:
pred = pred[:-extra]
predictions.extend(pred)
true = numpy.argmax(test_y, axis=1)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
precisions_test.append(precision_score(predictions, true))
recalls_test.append(recall_score(predictions, true))
f1s_test.append(f1_score(predictions, true))
# "early stopping" (not really stopping)
if f1s_vali[-1] > best_vali_f1:
best_y_pred = predictions
best_vali_f1 = f1s_vali[-1]
logger.debug('New best Validation F1: %f', best_vali_f1)
logger.debug('Epoch %3d of %3d, total loss = %.4f, ' +
'F1_train = %.4f, F1_test = %.4f',
i + 1, conf.LSTM_EPOCHS, totalloss, f1s_train[-1], f1s_test[-1])
if not os.path.exists(conf.LSTM_PLOTDIR):
os.mkdir(conf.LSTM_PLOTDIR)
plotfile = os.path.join(conf.LSTM_PLOTDIR,
'plot_%s_%d.png' % (cat, fold))
plot_losses_f1s(
losses, f1s_train,
precisions_vali, recalls_vali, f1s_vali,
precisions_test, recalls_test, f1s_test,
plotfile
)
sess.close()
del model
tf.reset_default_graph()
return best_y_pred
| mit |
agutieda/QuantEcon.py | quantecon/estspec.py | 7 | 4856 | """
Filename: estspec.py
Authors: Thomas Sargent, John Stachurski
Functions for working with periodograms of scalar data.
"""
from __future__ import division, print_function
import numpy as np
from numpy.fft import fft
from pandas import ols, Series
def smooth(x, window_len=7, window='hanning'):
"""
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
"""
if len(x) < window_len:
raise ValueError("Input vector length must be >= window length.")
if window_len < 3:
raise ValueError("Window length must be at least 3.")
if not window_len % 2: # window_len is even
window_len += 1
print("Window length reset to {}".format(window_len))
windows = {'hanning': np.hanning,
'hamming': np.hamming,
'bartlett': np.bartlett,
'blackman': np.blackman,
'flat': np.ones # moving average
}
# === Reflect x around x[0] and x[-1] prior to convolution === #
k = int(window_len / 2)
xb = x[:k] # First k elements
xt = x[-k:] # Last k elements
s = np.concatenate((xb[::-1], x, xt[::-1]))
# === Select window values === #
if window in windows.keys():
w = windows[window](window_len)
else:
msg = "Unrecognized window type '{}'".format(window)
print(msg + " Defaulting to hanning")
w = windows['hanning'](window_len)
return np.convolve(w / w.sum(), s, mode='valid')
def periodogram(x, window=None, window_len=7):
"""
Computes the periodogram
.. math::
I(w) = (1 / n) | sum_{t=0}^{n-1} x_t e^{itw} |^2
at the Fourier frequences w_j := 2 pi j / n, j = 0, ..., n - 1,
using the fast Fourier transform. Only the frequences w_j in [0,
pi] and corresponding values I(w_j) are returned. If a window type
is given then smoothing is performed.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional(default=7)
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
n = len(x)
I_w = np.abs(fft(x))**2 / n
w = 2 * np.pi * np.arange(n) / n # Fourier frequencies
w, I_w = w[:int(n/2)+1], I_w[:int(n/2)+1] # Take only values on [0, pi]
if window:
I_w = smooth(I_w, window_len=window_len, window=window)
return w, I_w
def ar_periodogram(x, window='hanning', window_len=7):
"""
Compute periodogram from data x, using prewhitening, smoothing and
recoloring. The data is fitted to an AR(1) model for prewhitening,
and the residuals are used to compute a first-pass periodogram with
smoothing. The fitted coefficients are then used for recoloring.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
# === run regression === #
x_current, x_lagged = x[1:], x[:-1] # x_t and x_{t-1}
x_current, x_lagged = Series(x_current), Series(x_lagged) # pandas series
results = ols(y=x_current, x=x_lagged, intercept=True, nw_lags=1)
e_hat = results.resid.values
phi = results.beta['x']
# === compute periodogram on residuals === #
w, I_w = periodogram(e_hat, window=window, window_len=window_len)
# === recolor and return === #
I_w = I_w / np.abs(1 - phi * np.exp(1j * w))**2
return w, I_w
| bsd-3-clause |
jakereimer/pipeline | python/pipeline/notify.py | 5 | 1997 | import datajoint as dj
from datajoint.jobs import key_hash
from . import experiment
schema = dj.schema('pipeline_notification', locals())
# Decorator for notification functions. Ignores exceptions.
def ignore_exceptions(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
print('Ignored exception:', e)
return wrapper
@schema
class SlackConnection(dj.Manual):
definition = """
# slack domain and api key for notification
domain : varchar(128) # slack domain
---
api_key : varchar(128) # api key for bot connection
"""
@schema
class SlackUser(dj.Manual):
definition = """
# information for user notification
-> experiment.Person
---
slack_user : varchar(128) # user on slack
-> SlackConnection
"""
def notify(self, message=None, file=None, file_title=None, file_comment=None,
channel=None):
if self: # user exists
from slacker import Slacker
api_key, user = (self * SlackConnection()).fetch1('api_key', 'slack_user')
s = Slacker(api_key, timeout=60)
channels = ['@' + user]
if channel is not None:
channels.append(channel)
for ch in channels:
if message: # None or ''
s.chat.post_message(ch, message, as_user=True)
if file is not None:
s.files.upload(file_=file, channels=ch, title=file_title,
initial_comment=file_comment)
def temporary_image(array, key):
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
import seaborn as sns
with sns.axes_style('white'):
plt.matshow(array, cmap='gray')
plt.axis('off')
filename = '/tmp/' + key_hash(key) + '.png'
plt.savefig(filename)
sns.reset_orig()
return filename
| lgpl-3.0 |
shusenl/scikit-learn | sklearn/tests/test_common.py | 70 | 7717 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
JaviMerino/bart | bart/common/Utils.py | 1 | 5498 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions for sheye"""
import trappy
import numpy as np
# pylint fails to recognize numpy members.
# pylint: disable=no-member
def init_run(trace):
"""Initialize the Run Object
:param trace: Path for the trace file
or a trace object
:type trace: str, :mod:`trappy.run.Run`
"""
if isinstance(trace, basestring):
return trappy.Run(trace)
elif isinstance(trace, trappy.Run):
return trace
raise ValueError("Invalid trace Object")
def select_window(series, window):
"""Helper Function to select a portion of
pandas time series
:param series: Input Time Series data
:type series: :mod:`pandas.Series`
:param window: A tuple indicating a time window
:type window: tuple
"""
if not window:
return series
start, stop = window
ix = series.index
selector = ((ix >= start) & (ix <= stop))
window_series = series[selector]
return window_series
def area_under_curve(series, sign=None, method="trapz", step="post"):
"""Return the area under the time series curve (Integral)
:param series: The time series to be integrated
:type series: :mod:`pandas.Series`
:param sign: Clip the data for the area in positive
or negative regions. Can have two values
- `"+"`
- `"-"`
:type sign: str
:param method: The method for area calculation. This can
be any of the integration methods supported in `numpy`
or `rect`
:type param: str
:param step: The step behaviour for `rect` method
:type step: str
*Rectangular Method*
- Step: Post
Consider the following time series data
.. code::
2 *----*----*----+
| |
1 | *----*----+
|
0 *----*----+
0 1 2 3 4 5 6 7
.. code::
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 2) = 8
- Step: Pre
.. code::
2 +----*----*----*
| |
1 | +----*----*----+
|
0 *----*
0 1 2 3 4 5 6 7
.. code::
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 3) = 9
"""
if sign == "+":
series = series.clip_lower(0)
elif sign == "=":
series = series.clip_upper(0)
series = series.dropna()
if method == "rect":
if step == "post":
values = series.values[:-1]
elif step == "pre":
values = series.values[1:]
else:
raise ValueError("Invalid Value for step: {}".format(step))
return (values * np.diff(series.index)).sum()
if hasattr(np, method):
np_integ_method = getattr(np, method)
np_integ_method(series.values, series.index)
else:
raise ValueError("Invalid method: {}".format(method))
def interval_sum(series, value=None):
"""A function that returns the sum of the
intervals where the value of series is equal to
the expected value. Consider the following time
series data
====== =======
Time Value
====== =======
1 0
2 0
3 1
4 1
5 1
6 1
7 0
8 1
9 0
10 1
11 1
====== =======
1 occurs contiguously between the following indices
the series:
- 3 to 6
- 10 to 11
There for `interval_sum` for the value 1 is
.. math::
(6 - 3) + (11 - 10) = 4
:param series: The time series data
:type series: :mod:`pandas.Series`
:param value: The value to checked for in the series. If the
value is None, the truth value of the elements in the
series will be used
:type value: element
"""
index = series.index
array = series.values
time_splits = np.append(np.where(np.diff(array) != 0), len(array) - 1)
prev = 0
time = 0
for split in time_splits:
first_val = series[index[split]]
check = (first_val == value) if value else first_val
if check and prev != split:
time += index[split] - index[prev]
prev = split + 1
return time
| apache-2.0 |
softwaresaved/SSINetworkGraphics | Fellows/Python/home_inst_map.py | 1 | 1379 | #!/usr/bin/python2.7
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
# set up orthographic map projection with
# perspective of satellite looking down at 50N, 100W.
# use low resolution coastlines.
# don't plot features that are smaller than 1000 square km.
map = Basemap(projection='ortho', lat_0 = 50, lon_0 = -100,
resolution = 'l', area_thresh = 10.)
# draw coastlines, country boundaries, fill continents.
map.drawcoastlines()
map.drawcountries()
map.fillcontinents(color = 'coral')
# draw the edge of the map projection region (the projection limb)
map.drawmapboundary()
# draw lat/lon grid lines every 30 degrees.
map.drawmeridians(np.arange(0, 360, 30))
map.drawparallels(np.arange(-90, 90, 30))
# lat/lon coordinates of eight home institutions.
lats = [51.5119,51.3796,56.3367,52.2053,54.0103,51.4247,51.5248,51.4960]
lons = [-0.11610,-2.32800,-2.82600,0.11720,-2.78560,-0.56690,-0.13360,-0.17640]
cities=['London, UK','Bath, UK','St Andrews, UK','Cambridge, UK',
'Lancaster, UK','London, UK','London, UK','London, UK']
# compute the native map projection coordinates for cities.
x,y = map(lons,lats)
# plot filled circles at the locations of the cities.
map.plot(x,y,'bo')
# plot the names of those eight cities.
for name,xpt,ypt in zip(cities,x,y):
plt.text(xpt+5000,ypt+5000,name)
plt.show()
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 34 | 50761 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
don-willingham/Sonoff-Tasmota | tools/serial-plotter.py | 2 | 5157 | #!/usr/bin/env python3
"""
serial-plotter.py - for Tasmota
Copyright (C) 2020 Christian Baars
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Requirements:
- Python
- pip3 install matplotlib pyserial
- for Windows: Full python install including tkinter
- a Tasmotadriver that plots
Instructions:
expects serial data in the format:
'PLOT: graphnumber value'
graph (1-4)
integer value
Code snippet example: (last value will be ignored)
AddLog_P2(LOG_LEVEL_INFO, PSTR("PLOT: %u, %u, %u,"),button_index+1, _value, Button.touch_hits[button_index]);
Usage:
./serial-plotter.py --port /dev/PORT --baud BAUD (or change defaults in the script)
set output in tasmota, e.g.; TouchCal 1..4 (via Textbox)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import TextBox
import time
import serial
import argparse
import sys
print("Python version")
print (sys.version)
#default values
port = '/dev/cu.SLAB_USBtoUART'
baud = 115200
#command line input
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p", help="change serial port, default: " + port)
parser.add_argument("--baud", "-b", help="change baud rate, default: " + str(baud))
args = parser.parse_args()
if args.port:
print("change serial port to %s" % args.port)
port = args.port
if args.baud:
print("change baud rate to %s" % args.baud)
baud = args.baud
#time range
dt = 0.01
t = np.arange(0.0, 100, dt)
#lists for the data
xs = [0] #counting up x
ys = [[0],[0],[0],[0]] #4 fixed graphs for now
max_y = 1
# min_y = 0
fig = plt.figure('Tasmota Serial Plotter')
ax = fig.add_subplot(111, autoscale_on=True, xlim=(0, 200), ylim=(0, 20)) #fixed x scale for now, y will adapt
ax.grid()
line1, = ax.plot([], [], color = "r", label='G 1')
line2, = ax.plot([], [], color = "g", label='G 2')
line3, = ax.plot([], [], color = "b", label='G 3')
line4, = ax.plot([], [], color = "y", label='G 4')
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
ser = serial.Serial()
ser.port = port
ser.baudrate = baud
ser.timeout = 0 #return immediately
try:
ser.open()
except:
print("Could not connect to serial with settings: " + str(ser.port) + ' at ' + str(ser.baudrate) + 'baud')
print("port available?")
exit()
if ser.is_open==True:
print("Serial Plotter started ...:")
plt.title('connected to ' + str(ser.port) + ' at ' + str(ser.baudrate) + 'baud')
else:
print("Could not connect to serial: " + str(ser.port) + ' at ' + str(ser.baudrate) + 'baud')
plt.title('NOT connected to ' + str(ser.port) + ' at ' + str(ser.baudrate) + 'baud')
def init():
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
line4.set_data([], [])
time_text.set_text('')
return [line1,line2,line3,line4,time_text ] #was line
def parse_line(data_line):
pos = data_line.find("PLOT:", 10)
if pos<0:
# print("wrong format")
return 0,0
raw_data = data_line[pos+6:]
val_list = raw_data.split(',')
try:
g = int(val_list[0])
v = int(val_list[1])
return g, v
except:
return 0,0
def update(num, line1, line2):
global xs, ys, max_y
time_text.set_text(time_template % (num*dt) )
receive_data = str(ser.readline()) #string
g, v = parse_line(receive_data)
if (g in range(1,5)):
# print(v,g)
if v>max_y:
max_y = v
print(max_y)
ax.set_ylim([0, max_y * 1.2])
idx = 0
for y in ys:
y.append(y[-1])
if idx == g-1:
y[-1] = v
idx = idx +1
xs.append(xs[-1]+1)
if len(ys[0])>200:
xs.pop()
for y in ys:
y.pop(0)
line1.set_data(xs, ys[0])
line2.set_data(xs, ys[1])
line3.set_data(xs, ys[2])
line4.set_data(xs, ys[3])
return [line1,line2,line3,line4, time_text]
def handle_close(evt):
print('Closing serial connection')
ser.close()
print('Closed serial plotter')
def submit(text):
print (text)
ser.write(text.encode() + "\n".encode())
ani = animation.FuncAnimation(fig, update, None, fargs=[line1, line2],
interval=10, blit=True, init_func=init)
ax.set_xlabel('Last 200 Samples')
ax.set_ylabel('Values')
plt.subplots_adjust(bottom=0.25)
ax.legend(loc='lower right', ncol=2)
fig.canvas.mpl_connect('close_event', handle_close)
axbox = plt.axes([0.15, 0.05, 0.7, 0.075])
text_box = TextBox(axbox, 'Send:', initial='')
text_box.on_submit(submit)
if ser.is_open==True:
plt.show()
| gpl-3.0 |
ThiagoGarciaAlves/intellij-community | python/helpers/pydev/pydevd.py | 3 | 69138 | '''
Entry point module (keep at root):
This module starts the debugger.
'''
import sys
if sys.version_info[:2] < (2, 6):
raise RuntimeError('The PyDev.Debugger requires Python 2.6 onwards to be run. If you need to use an older Python version, use an older version of the debugger.')
import atexit
import os
import traceback
from _pydevd_bundle.pydevd_constants import IS_JYTH_LESS25, IS_PY3K, IS_PY34_OR_GREATER, IS_PYCHARM, get_thread_id, \
dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, xrange, \
clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV
from _pydev_bundle import fix_getpass
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_io, pydevd_vm_type, pydevd_tracing
from _pydevd_bundle import pydevd_utils
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, update_exception_hook
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO, CMD_STEP_OVER, \
CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, CMD_THREAD_SUSPEND, CMD_RUN_TO_LINE, \
CMD_ADD_EXCEPTION_BREAK, CMD_SMART_STEP_INTO, InternalConsoleExec, NetCommandFactory, \
PyDBDaemonThread, _queue, ReaderThread, GetGlobalDebugger, get_global_debugger, \
set_global_debugger, WriterThread, pydevd_find_thread_by_id, pydevd_log, \
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace, \
InternalSendCurrExceptionTraceProceeded
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from _pydevd_bundle.pydevd_trace_dispatch import trace_dispatch as _trace_dispatch, global_cache_skips, global_cache_frame_skips, show_tracing_warning
from _pydevd_frame_eval.pydevd_frame_eval_main import frame_eval_func, stop_frame_eval, enable_cache_frames_without_breaks, \
dummy_trace_dispatch, show_frame_eval_warning
from _pydevd_bundle.pydevd_utils import save_main_module
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads
from pydevd_file_utils import get_fullname
__version_info__ = (1, 1, 1)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
#IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
forked = False
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.py_db = py_db
self.setName('pydevd.CommandThread')
def _on_run(self):
for i in xrange(1, 10):
time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads
if self.killReceived:
return
if self.pydev_do_not_trace:
self.py_db.SetTrace(None) # no debugging on this thread
try:
while not self.killReceived:
try:
self.py_db.process_internal_commands()
except:
pydevd_log(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.5)
except:
pydev_log.debug(sys.exc_info()[0])
#only got this error in interpreter shutdown
#pydevd_log(0, 'Finishing debug communication...(3)')
#=======================================================================================================================
# CheckOutputThread
# Non-daemonic thread guaranties that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self.py_db = py_db
self.setName('pydevd.CheckAliveThread')
self.daemon = False
py_db.output_checker = self
def _on_run(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
while not self.killReceived:
time.sleep(0.3)
if not self.py_db.has_threads_alive() and self.py_db.writer.empty() \
and not has_data_to_redirect():
try:
pydev_log.debug("No alive threads, finishing debug session")
self.py_db.finish_debugging_session()
kill_all_pydev_threads()
except:
traceback.print_exc()
self.killReceived = True
self.py_db.check_output_redirect()
def do_kill_pydev_thread(self):
self.killReceived = True
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self):
set_global_debugger(self)
pydevd_tracing.replace_sys_set_trace_func()
self.reader = None
self.writer = None
self.output_checker = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread
self.breakpoints = {}
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.ready_to_run = False
self._main_lock = thread.allocate_lock()
self._lock_running_thread_ids = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finish_debugging_session = False
self._termination_event_set = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.break_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception
SUSPEND_ON_BREAKPOINT_EXCEPTION = True
self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self.is_filter_enabled = pydevd_utils.is_filter_enabled()
self.is_filter_libraries = pydevd_utils.is_filter_libraries()
self.show_return_values = False
self.remove_return_values_flag = False
# this flag disables frame evaluation even if it's available
self.do_not_use_frame_eval = False
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def not_in_scope(self, filename):
return pydevd_utils.not_in_project_roots(filename)
def is_ignored_by_filters(self, filename):
return pydevd_utils.is_ignored_by_filter(filename)
def first_appearance_in_scope(self, trace):
if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename):
return False
else:
trace = trace.tb_next
while trace is not None:
frame = trace.tb_frame
if not self.not_in_scope(frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def has_threads_alive(self):
for t in threadingEnumerate():
if getattr(t, 'is_pydev_daemon_thread', False):
#Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be
#set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag.
#See: https://github.com/fabioz/PyDev.Debugger/issues/11
continue
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def finish_debugging_session(self):
self._finish_debugging_session = True
def initialize_network(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = start_client(host, port)
else:
s = start_server(port)
self.initialize_network(s)
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
threads = threadingEnumerate()
for t in threads:
thread_id = get_thread_id(t)
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
else:
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def check_output_redirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
init_stdout_redirect()
self.check_output(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
init_stderr_redirect()
self.check_output(sys.stderrBuf, 2) #@UndefinedVariable
def check_output(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmd_factory.make_io_message(v, outCtx, self)
except:
traceback.print_exc()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def _activate_mpl_if_needed(self):
if len(self.mpl_modules_for_patching) > 0:
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = self.mpl_modules_for_patching.pop(module)
activate_function()
self.mpl_in_use = True
def _call_mpl_hook(self):
try:
from pydev_ipython.inputhook import get_inputhook
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
def suspend_all_other_threads(self, thread_suspended_at_bp):
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif hasattr(t, 'pydev_do_not_trace'):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is thread_suspended_at_bp:
continue
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True)
del frame
self.set_suspend(t, CMD_THREAD_SUSPEND)
else:
sys.stderr.write("Can't suspend thread: %s\n" % (t,))
def process_internal_commands(self):
'''This function processes internal commands
'''
self._main_lock.acquire()
try:
self.check_output_redirect()
curr_thread_id = get_thread_id(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif is_thread_alive(t):
if not self._running_thread_ids:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
# TODO: Investigate: should we do this for all threads in threading.enumerate()?
# (i.e.: if a fork happens on Linux, this seems likely).
old_thread_id = get_thread_id(t)
if old_thread_id != 'console_main':
# The console_main is a special thread id used in the console and its id should never be reset
# (otherwise we may no longer be able to get its variables -- see: https://www.brainwy.com/tracker/PyDev/776).
clear_cached_thread_id(t)
clear_cached_thread_id(threadingCurrentThread())
thread_id = get_thread_id(t)
curr_thread_id = get_thread_id(threadingCurrentThread())
if pydevd_vars.has_additional_frames_by_id(old_thread_id):
frames_by_id = pydevd_vars.get_additional_frames_by_id(old_thread_id)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_by_id)
else:
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
if thread_id not in self._running_thread_ids:
if not hasattr(t, 'additional_info'):
# see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
# Let's create the additional info right away!
t.additional_info = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.add_command(self.cmd_factory.make_thread_created_message(t))
queue = self.get_internal_queue(thread_id)
cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydevd_log(2, "Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydevd_log(2, "processing internal command ", str(int_cmd))
int_cmd.do_it(self)
else:
pydevd_log(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except _queue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if tId not in program_threads_alive:
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self._process_thread_not_alive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
finally:
self._main_lock.release()
def disable_tracing_while_running_if_frame_eval(self):
pydevd_tracing.settrace_while_running_if_frame_eval(self, self.dummy_trace_dispatch)
def enable_tracing_in_frames_while_running_if_frame_eval(self):
pydevd_tracing.settrace_while_running_if_frame_eval(self, self.trace_dispatch)
def set_tracing_for_untraced_contexts_if_not_frame_eval(self, ignore_frame=None, overwrite_prev_trace=False):
if self.frame_eval_func is not None:
return
self.set_tracing_for_untraced_contexts(ignore_frame, overwrite_prev_trace)
def set_tracing_for_untraced_contexts(self, ignore_frame=None, overwrite_prev_trace=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
if self.frame_eval_func is not None:
return
threads = threadingEnumerate()
try:
for t in threads:
if getattr(t, 'is_pydev_daemon_thread', False):
continue
# TODO: optimize so that we only actually add that tracing if it's in
# the new breakpoint context.
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
if frame is not ignore_frame:
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=overwrite_prev_trace)
finally:
frame = None
t = None
threads = None
additional_info = None
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
global_cache_skips.clear()
global_cache_frame_skips.clear()
def add_break_on_exception(
self,
exception,
condition,
expression,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
condition,
expression,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_terminate:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_always:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def update_after_exceptions_added(self, added):
updated_on_caught = False
updated_on_uncaught = False
for eb in added:
if not updated_on_uncaught and eb.notify_on_terminate:
updated_on_uncaught = True
update_exception_hook(self)
if not updated_on_caught and eb.notify_always:
updated_on_caught = True
self.set_tracing_for_untraced_contexts_if_not_frame_eval()
def _process_thread_not_alive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additional_info.pydev_notify_kill
if not wasNotified:
thread.additional_info.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmd_factory.make_thread_killed_message(threadId)
self.writer.add_command(cmd)
def set_suspend(self, thread, stop_reason):
thread.additional_info.suspend_type = PYTHON_SUSPEND
thread.additional_info.pydev_state = STATE_SUSPEND
thread.stop_reason = stop_reason
# If conditional breakpoint raises any exception during evaluation send details to Java
if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception:
self._send_breakpoint_condition_exception(thread)
def _send_breakpoint_condition_exception(self, thread):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
conditional_breakpoint_exception_tuple = thread.additional_info.conditional_breakpoint_exception
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
# Reset the conditional_breakpoint_exception details to None
thread.additional_info.conditional_breakpoint_exception = None
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def send_process_created_message(self):
"""Sends a message that a new process has been created.
"""
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd)
def set_next_statement(self, frame, event, func_name, next_line):
stop = False
response_msg = ""
old_line = frame.f_lineno
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == func_name:
line = next_line
if frame.f_lineno == line:
stop = True
else:
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
else:
response_msg = "jump is available only within the bottom frame"
return stop, old_line, response_msg
def cancel_async_evaluation(self, thread_id, frame_id):
self._main_lock.acquire()
try:
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False) and hasattr(t, 'cancel_event') and t.thread_id == thread_id and \
t.frame_id == frame_id:
t.cancel_event.set()
except:
pass
finally:
self._main_lock.release()
def do_wait_suspend(self, thread, frame, event, arg, suspend_type="trace", send_suspend_message=True): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.process_internal_commands()
if send_suspend_message:
message = thread.additional_info.pydev_message
cmd = self.cmd_factory.make_thread_suspend_message(get_thread_id(thread), frame, thread.stop_reason, message, suspend_type)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
from_this_thread = []
for frame_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(frame_id, custom_frame.name))
self.writer.add_command(self.cmd_factory.make_thread_suspend_message(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_id)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
info = thread.additional_info
if info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
# before every stop check if matplotlib modules were imported inside script code
self._activate_mpl_if_needed()
while info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
self.cancel_async_evaluation(get_thread_id(thread), str(id(frame)))
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.set_trace_for_frame_and_parents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT:
self.set_trace_for_frame_and_parents(frame)
stop = False
response_msg = ""
old_line = frame.f_lineno
if not IS_PYCHARM:
stop, _, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
if stop:
info.pydev_state = STATE_SUSPEND
self.do_wait_suspend(thread, frame, event, arg, "trace")
return
else:
try:
stop, old_line, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
except ValueError as e:
response_msg = "%s" % e
finally:
seq = info.pydev_message
cmd = self.cmd_factory.make_set_next_stmnt_status_message(seq, stop, response_msg)
self.writer.add_command(cmd)
info.pydev_message = ''
if stop:
info.pydev_state = STATE_RUN
# `f_line` should be assigned within a tracing function, so, we can't assign it here
# for the frame evaluation debugger. For tracing debugger it will be assigned, but we should
# revert the previous value, because both debuggers should behave the same way
try:
self.set_next_statement(frame, event, info.pydev_func_name, old_line)
except:
pass
else:
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command
self.do_wait_suspend(thread, frame, event, arg, "trace", send_suspend_message=False)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
if self.frame_eval_func is not None and info.pydev_state == STATE_RUN:
if info.pydev_step_cmd == -1:
if not self.do_not_use_frame_eval:
self.SetTrace(self.dummy_trace_dispatch)
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True, dispatch_func=dummy_trace_dispatch)
else:
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True)
# enable old tracing function for stepping
self.SetTrace(self.trace_dispatch)
del frame
cmd = self.cmd_factory.make_thread_run_message(get_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
def handle_post_mortem_stop(self, thread, frame, frames_byid, exception):
pydev_log.debug("We are stopping in post-mortem\n")
thread_id = get_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_byid)
try:
try:
add_exception_to_frame(frame, exception)
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', None, "trace")
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0])
finally:
pydevd_vars.remove_additional_frame_by_id(thread_id)
def set_trace_for_frame_and_parents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None):
if dispatch_func is None:
dispatch_func = self.trace_dispatch
if also_add_to_passed_frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
while frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
del frame
def update_trace(self, frame, dispatch_func, overwrite_prev):
if frame.f_trace is None:
frame.f_trace = dispatch_func
else:
if overwrite_prev:
frame.f_trace = dispatch_func
else:
try:
#If it's the trace_exception, go back to the frame trace dispatch!
if frame.f_trace.im_func.__name__ == 'trace_exception':
frame.f_trace = frame.f_trace.im_self.trace_dispatch
except AttributeError:
pass
frame = frame.f_back
del frame
def prepare_to_run(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
if self.signature_factory is not None or self.thread_analyser is not None:
# we need all data to be sent to IDE even after program finishes
CheckOutputThread(self).start()
# turn off frame evaluation for concurrency visualization
self.frame_eval_func = None
self.patch_threads()
pydevd_tracing.SetTrace(self.trace_dispatch, self.frame_eval_func, self.dummy_trace_dispatch)
# There is no need to set tracing function if frame evaluation is available. Moreover, there is no need to patch thread
# functions, because frame evaluation function is set to all threads by default.
PyDBCommandThread(self).start()
if show_tracing_warning or show_frame_eval_warning:
cmd = self.cmd_factory.make_show_cython_warning_message()
self.writer.add_command(cmd)
def patch_threads(self):
try:
# not available in jython!
import threading
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def run(self, file, globals=None, locals=None, is_module=False, set_trace=True):
module_name = None
if is_module:
file, _, entry_point_fn = file.partition(':')
module_name = file
filename = get_fullname(file)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
if set_trace:
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
if not is_module:
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run must be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(file)[0])
while not self.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
if self.break_on_caught_exceptions or (self.plugin and self.plugin.has_exception_breaks()) or self.signature_factory:
# disable frame evaluation if there are exception breakpoints with 'On raise' activation policy
# or if there are plugin exception breakpoints or if collecting run-time types is enabled
self.frame_eval_func = None
# call prepare_to_run when we already have all information about breakpoints
self.prepare_to_run()
if self.thread_analyser is not None:
wrap_threads()
t = threadingCurrentThread()
self.thread_analyser.set_start_time(cur_time())
send_message("threading_event", 0, t.getName(), get_thread_id(t), "thread", "start", file, 1, None, parent=get_thread_id(t))
if self.asyncio_analyser is not None:
# we don't have main thread in asyncio graph, so we should add a fake event
send_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
if INTERACTIVE_MODE_AVAILABLE:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
if hasattr(sys, 'exc_clear'):
# we should clean exception information in Python 2, before user's code execution
sys.exc_clear()
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a seperator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals
def exiting(self):
sys.stdout.flush()
sys.stderr.flush()
self.check_output_redirect()
cmd = self.cmd_factory.make_exit_message()
self.writer.add_command(cmd)
def wait_for_commands(self, globals):
self._activate_mpl_if_needed()
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_thread_id(thread)
from _pydevd_bundle import pydevd_vars
pydevd_vars.add_additional_frame_by_id(thread_id, {id(frame): frame})
cmd = self.cmd_factory.make_show_console_message(thread_id, frame)
self.writer.add_command(cmd)
while True:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
trace_dispatch = _trace_dispatch
frame_eval_func = frame_eval_func
dummy_trace_dispatch = dummy_trace_dispatch
enable_cache_frames_without_breaks = enable_cache_frames_without_breaks
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support(qt_support_mode):
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt(qt_support_mode)
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def init_stdout_redirect():
if not getattr(sys, 'stdoutBuf', None):
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout_original = sys.stdout
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
def init_stderr_redirect():
if not getattr(sys, 'stderrBuf', None):
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr_original = sys.stderr
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
def has_data_to_redirect():
if getattr(sys, 'stdoutBuf', None):
if not sys.stdoutBuf.empty():
return True
if getattr(sys, 'stderrBuf', None):
if not sys.stderrBuf.empty():
return True
return False
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
)
finally:
_set_trace_lock.release()
_set_trace_lock = thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected:
pydevd_vm_type.setup_type()
if SetupHolder.setup is None:
setup = {
'client': host, # dispatch expects client to be set to the host address when server is False
'server': False,
'port': int(port),
'multiprocess': patch_multiprocessing,
}
SetupHolder.setup = setup
debugger = PyDB()
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
patch_stdin(debugger)
debugger.set_trace_for_frame_and_parents(get_frame(), False, overwrite_prev_trace=overwrite_prev_trace)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
debugger.set_trace_for_frame_and_parents(custom_frame.frame, False)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
while not debugger.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
global forked
frame_eval_for_tracing = debugger.frame_eval_func
if frame_eval_func is not None and not forked:
# Disable frame evaluation for Remote Debug Server
frame_eval_for_tracing = None
# note that we do that through pydevd_tracing.SetTrace so that the tracing
# is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch, frame_eval_for_tracing, debugger.dummy_trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.set_tracing_for_untraced_contexts(ignore_frame=get_frame(), overwrite_prev_trace=overwrite_prev_trace)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
PyDBCommandThread(debugger).start()
CheckOutputThread(debugger).start()
#Suspend as the last thing after all tracing is in place.
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = get_global_debugger()
debugger.set_trace_for_frame_and_parents(get_frame(), False)
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
pydevd_tracing.SetTrace(debugger.trace_dispatch, debugger.frame_eval_func, debugger.dummy_trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
def stoptrace():
global connected
if connected:
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
#not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = get_global_debugger()
if debugger:
debugger.set_trace_for_frame_and_parents(
get_frame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None)
debugger.exiting()
kill_all_pydev_threads()
connected = False
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False #we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
def handle_except(self):
ReaderThread.handle_except(self)
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
host, port = dispatch()
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if port is not None:
global connected
connected = False
global forked
forked = True
custom_frames_container_init()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': ''}
default_options.update(setup_options)
setup_options = default_options
debugger = GetGlobalDebugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support(setup_options['qt-support'])
def patch_stdin(debugger):
from _pydev_bundle.pydev_console_utils import DebugConsoleStdIn
orig_stdin = sys.stdin
sys.stdin = DebugConsoleStdIn(debugger, orig_stdin)
# Dispatch on_debugger_modules_loaded here, after all primary debugger modules are loaded
from _pydevd_bundle.pydevd_extension_api import DebuggerEventHandler
from _pydevd_bundle import pydevd_extension_utils
for handler in pydevd_extension_utils.extensions_of_type(DebuggerEventHandler):
handler.on_debugger_modules_loaded(debugger_version=__version__)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# parse the command line. --file is our last argument that is required
try:
from _pydevd_bundle.pydevd_command_line_handling import process_command_line
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fix_getpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if SHOW_DEBUG_INFO_ENV:
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
global DISPATCH_APPROACH
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" %port)
pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
pydev_log.info("pydev debugger: starting\n")
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
# It's ok not having stackless there...
try:
sys.exc_clear() # the exception information should be cleaned in Python 2
except:
pass
is_module = setup['module']
patch_stdin(debugger)
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OR_GREATER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
global connected
connected = True # Mark that we're connected when started from inside ide.
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
if __name__ == '__main__':
main()
| apache-2.0 |
rcharp/toyota-flask | venv/lib/python2.7/site-packages/numpy/lib/polynomial.py | 35 | 37641 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| apache-2.0 |
pravsripad/mne-python | mne/io/edf/tests/test_edf.py | 4 | 21326 | # -*- coding: utf-8 -*-
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
# Alan Leggitt <alan.leggitt@ucsf.edu>
# Alexandre Barachant <alexandre.barachant@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD (3-clause)
from functools import partial
import os.path as op
import inspect
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
from scipy.io import loadmat
import pytest
from mne import pick_types, Annotations
from mne.datasets import testing
from mne.fixes import nullcontext
from mne.utils import requires_pandas
from mne.io import read_raw_edf, read_raw_bdf, read_raw_fif, edf, read_raw_gdf
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.edf.edf import (_get_edf_default_event_id, _read_annotations_edf,
_read_ch, _parse_prefilter_string, _edf_str,
_read_edf_header, _read_header)
from mne.io.pick import channel_indices_by_type, get_channel_type_constants
from mne.annotations import events_from_annotations, read_annotations
td_mark = testing._pytest_mark()
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts') # XXX: missing reader
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
duplicate_channel_labels_path = op.join(data_dir,
'duplicate_channel_labels.edf')
edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
data_path = testing.data_path(download=False)
edf_stim_resamp_path = op.join(data_path, 'EDF', 'test_edf_stim_resamp.edf')
edf_overlap_annot_path = op.join(data_path, 'EDF',
'test_edf_overlapping_annotations.edf')
edf_reduced = op.join(data_path, 'EDF', 'test_reduced.edf')
bdf_stim_channel_path = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')
bdf_multiple_annotations_path = op.join(data_path, 'BDF',
'multiple_annotation_chans.bdf')
test_generator_bdf = op.join(data_path, 'BDF', 'test_generator_2.bdf')
test_generator_edf = op.join(data_path, 'EDF', 'test_generator_2.edf')
edf_annot_sub_s_path = op.join(data_path, 'EDF', 'subsecond_starttime.edf')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_orig_units():
"""Test exposure of original channel units."""
raw = read_raw_edf(edf_path, preload=True)
# Test original units
orig_units = raw._orig_units
assert len(orig_units) == len(raw.ch_names)
assert orig_units['A1'] == 'µV' # formerly 'uV' edit by _check_orig_units
def test_subject_info(tmpdir):
"""Test exposure of original channel units."""
raw = read_raw_edf(edf_path)
assert raw.info['subject_info'] is None # XXX this is arguably a bug
edf_info = raw._raw_extras[0]
assert edf_info['subject_info'] is not None
want = {'id': 'X', 'sex': 'X', 'birthday': 'X', 'name': 'X'}
for key, val in want.items():
assert edf_info['subject_info'][key] == val, key
fname = tmpdir.join('test_raw.fif')
raw.save(fname)
raw = read_raw_fif(fname)
assert raw.info['subject_info'] is None # XXX should eventually round-trip
def test_bdf_data():
"""Test reading raw bdf files."""
# XXX BDF data for these is around 0.01 when it should be in the uV range,
# probably some bug
test_scaling = False
raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path,
eog=eog, misc=misc,
exclude=['M2', 'IEOG'],
test_scaling=test_scaling,
)
assert len(raw_py.ch_names) == 71
raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path,
montage='biosemi64', eog=eog, misc=misc,
exclude=['M2', 'IEOG'],
test_scaling=test_scaling)
assert len(raw_py.ch_names) == 71
assert 'RawEDF' in repr(raw_py)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
# bdf saved as a single, resolution to seven decimal points in matlab
assert_array_almost_equal(data_py, data_eeglab, 8)
# Manually checking that float coordinates are imported
assert (raw_py.info['chs'][0]['loc']).any()
assert (raw_py.info['chs'][25]['loc']).any()
assert (raw_py.info['chs'][63]['loc']).any()
@testing.requires_testing_data
def test_bdf_crop_save_stim_channel(tmpdir):
"""Test EDF with various sampling rates."""
raw = read_raw_bdf(bdf_stim_channel_path)
raw.save(tmpdir.join('test-raw.fif'), tmin=1.2, tmax=4.0, overwrite=True)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [
edf_reduced,
edf_overlap_annot_path,
])
@pytest.mark.parametrize('stim_channel', (None, False, 'auto'))
def test_edf_others(fname, stim_channel):
"""Test EDF with various sampling rates and overlapping annotations."""
_test_raw_reader(
read_raw_edf, input_fname=fname, stim_channel=stim_channel,
verbose='error')
def test_edf_data_broken(tmpdir):
"""Test edf files."""
raw = _test_raw_reader(read_raw_edf, input_fname=edf_path,
exclude=['Ergo-Left', 'H10'], verbose='error')
raw_py = read_raw_edf(edf_path)
data = raw_py.get_data()
assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
# Test with number of records not in header (-1).
broken_fname = op.join(tmpdir, 'broken.edf')
with open(edf_path, 'rb') as fid_in:
fid_in.seek(0, 2)
n_bytes = fid_in.tell()
fid_in.seek(0, 0)
rbytes = fid_in.read()
with open(broken_fname, 'wb') as fid_out:
fid_out.write(rbytes[:236])
fid_out.write(b'-1 ')
fid_out.write(rbytes[244:244 + int(n_bytes * 0.4)])
with pytest.warns(RuntimeWarning,
match='records .* not match the file size'):
raw = read_raw_edf(broken_fname, preload=True)
read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True)
# Test with \x00's in the data
with open(broken_fname, 'wb') as fid_out:
fid_out.write(rbytes[:184])
assert rbytes[184:192] == b'36096 '
fid_out.write(rbytes[184:192].replace(b' ', b'\x00'))
fid_out.write(rbytes[192:])
raw_py = read_raw_edf(broken_fname)
data_new = raw_py.get_data()
assert_allclose(data, data_new)
def test_duplicate_channel_labels_edf():
"""Test reading edf file with duplicate channel names."""
EXPECTED_CHANNEL_NAMES = ['EEG F1-Ref-0', 'EEG F2-Ref', 'EEG F1-Ref-1']
with pytest.warns(RuntimeWarning, match='Channel names are not unique'):
raw = read_raw_edf(duplicate_channel_labels_path, preload=False)
assert raw.ch_names == EXPECTED_CHANNEL_NAMES
def test_parse_annotation(tmpdir):
"""Test parsing the tal channel."""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot_file = tmpdir.join('annotations.txt')
annot_file.write(annot)
annot = [a for a in bytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel_A = np.array(list(map(sum, zip(annot[0::2], annot[1::2]))),
dtype=np.int64)
with open(str(annot_file), 'rb') as fid:
# ch_data = np.fromfile(fid, dtype='<i2', count=len(annot))
tal_channel_B = _read_ch(fid, subtype='EDF', dtype='<i2',
samp=(len(annot) - 1) // 2,
dtype_byte='This_parameter_is_not_used')
want_onset, want_duration, want_description = zip(
*[[180., 0., 'Lights off'], [180., 0., 'Close door'],
[180., 0., 'Lights off'], [180., 0., 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']])
for tal_channel in [tal_channel_A, tal_channel_B]:
onset, duration, description = _read_annotations_edf([tal_channel])
assert_allclose(onset, want_onset)
assert_allclose(duration, want_duration)
assert description == want_description
def test_find_events_backward_compatibility():
"""Test if events are detected correctly in a typical MNE workflow."""
EXPECTED_EVENTS = [[68, 0, 2],
[199, 0, 2],
[1024, 0, 3],
[1280, 0, 2]]
# test an actual file
raw = read_raw_edf(edf_path, preload=True)
event_id = _get_edf_default_event_id(raw.annotations.description)
event_id.pop('start')
events_from_EFA, _ = events_from_annotations(raw, event_id=event_id,
use_rounding=False)
assert_array_equal(events_from_EFA, EXPECTED_EVENTS)
@requires_pandas
@pytest.mark.parametrize('fname', [edf_path, bdf_path])
def test_to_data_frame(fname):
"""Test EDF/BDF Raw Pandas exporter."""
ext = op.splitext(fname)[1].lstrip('.').lower()
if ext == 'edf':
raw = read_raw_edf(fname, preload=True, verbose='error')
elif ext == 'bdf':
raw = read_raw_bdf(fname, preload=True, verbose='error')
_, times = raw[0, :10]
df = raw.to_data_frame(index='time')
assert (df.columns == raw.ch_names).all()
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
assert 'time' in df.columns
assert_array_equal(df.values[:, 1], raw._data[0] * 1e13)
def test_read_raw_edf_stim_channel_input_parameters():
"""Test edf raw reader deprecation."""
_MSG = "`read_raw_edf` is not supposed to trigger a deprecation warning"
with pytest.warns(None) as recwarn:
read_raw_edf(edf_path)
assert all([w.category != DeprecationWarning for w in recwarn.list]), _MSG
for invalid_stim_parameter in ['EDF Annotations', 'BDF Annotations']:
with pytest.raises(ValueError,
match="stim channel is not supported"):
read_raw_edf(edf_path, stim_channel=invalid_stim_parameter)
def _assert_annotations_equal(a, b):
assert_array_equal(a.onset, b.onset)
assert_array_equal(a.duration, b.duration)
assert_array_equal(a.description, b.description)
assert a.orig_time == b.orig_time
def test_read_annot(tmpdir):
"""Test parsing the tal channel."""
EXPECTED_ANNOTATIONS = [[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']]
EXPECTED_ONSET = [180.0, 180.0, 180.0, 180.0, 3.14, 1800.2]
EXPECTED_DURATION = [0, 0, 0, 0, 4.2, 25.5]
EXPECTED_DESC = ['Lights off', 'Close door', 'Lights off', 'Close door',
'nothing', 'Apnea']
EXPECTED_ANNOTATIONS = Annotations(onset=EXPECTED_ONSET,
duration=EXPECTED_DURATION,
description=EXPECTED_DESC,
orig_time=None)
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot_file = tmpdir.join('annotations.txt')
annot_file.write(annot)
onset, duration, desc = _read_annotations_edf(annotations=str(annot_file))
annotation = Annotations(onset=onset, duration=duration, description=desc,
orig_time=None)
_assert_annotations_equal(annotation, EXPECTED_ANNOTATIONS)
# Now test when reading from buffer of data
with open(str(annot_file), 'rb') as fid:
ch_data = np.fromfile(fid, dtype='<i2', count=len(annot))
onset, duration, desc = _read_annotations_edf([ch_data])
annotation = Annotations(onset=onset, duration=duration, description=desc,
orig_time=None)
_assert_annotations_equal(annotation, EXPECTED_ANNOTATIONS)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [test_generator_edf, test_generator_bdf])
def test_read_annotations(fname, recwarn):
"""Test IO of annotations from edf and bdf files via regexp."""
annot = read_annotations(fname)
assert len(annot.onset) == 2
def test_edf_prefilter_parse():
"""Test prefilter strings from header are parsed correctly."""
prefilter_basic = ["HP: 0Hz LP: 0Hz"]
highpass, lowpass = _parse_prefilter_string(prefilter_basic)
assert_array_equal(highpass, ["0"])
assert_array_equal(lowpass, ["0"])
prefilter_normal_multi_ch = ["HP: 1Hz LP: 30Hz"] * 10
highpass, lowpass = _parse_prefilter_string(prefilter_normal_multi_ch)
assert_array_equal(highpass, ["1"] * 10)
assert_array_equal(lowpass, ["30"] * 10)
prefilter_unfiltered_ch = prefilter_normal_multi_ch + [""]
highpass, lowpass = _parse_prefilter_string(prefilter_unfiltered_ch)
assert_array_equal(highpass, ["1"] * 10)
assert_array_equal(lowpass, ["30"] * 10)
prefilter_edf_specs_doc = ["HP:0.1Hz LP:75Hz N:50Hz"]
highpass, lowpass = _parse_prefilter_string(prefilter_edf_specs_doc)
assert_array_equal(highpass, ["0.1"])
assert_array_equal(lowpass, ["75"])
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [test_generator_edf, test_generator_bdf])
def test_load_generator(fname, recwarn):
"""Test IO of annotations from edf and bdf files with raw info."""
ext = op.splitext(fname)[1][1:].lower()
if ext == 'edf':
raw = read_raw_edf(fname)
elif ext == 'bdf':
raw = read_raw_bdf(fname)
assert len(raw.annotations.onset) == 2
found_types = [k for k, v in
channel_indices_by_type(raw.info, picks=None).items()
if v]
assert len(found_types) == 1
events, event_id = events_from_annotations(raw)
ch_names = ['squarewave', 'ramp', 'pulse', 'ECG', 'noise', 'sine 1 Hz',
'sine 8 Hz', 'sine 8.5 Hz', 'sine 15 Hz', 'sine 17 Hz',
'sine 50 Hz']
assert raw.get_data().shape == (11, 120000)
assert raw.ch_names == ch_names
assert event_id == {'RECORD START': 2, 'REC STOP': 1}
assert_array_equal(events, [[0, 0, 2], [120000, 0, 1]])
@pytest.mark.parametrize('EXPECTED, test_input', [
pytest.param({'stAtUs': 'stim', 'tRigGer': 'stim', 'sine 1 Hz': 'eeg'},
'auto', id='auto'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'eeg'},
None, id='None'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
'sine 1 Hz', id='single string'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
2, id='single int'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
-1, id='single int (revers indexing)'),
pytest.param({'stAtUs': 'stim', 'tRigGer': 'stim', 'sine 1 Hz': 'eeg'},
[0, 1], id='int list')])
def test_edf_stim_ch_pick_up(test_input, EXPECTED):
"""Test stim_channel."""
# This is fragile for EEG/EEG-CSD, so just omit csd
KIND_DICT = get_channel_type_constants()
TYPE_LUT = {v['kind']: k for k, v in KIND_DICT.items() if k not in
('csd', 'chpi')} # chpi not needed, and unhashable (a list)
fname = op.join(data_dir, 'test_stim_channel.edf')
raw = read_raw_edf(fname, stim_channel=test_input)
ch_types = {ch['ch_name']: TYPE_LUT[ch['kind']] for ch in raw.info['chs']}
assert ch_types == EXPECTED
@testing.requires_testing_data
def test_bdf_multiple_annotation_channels():
"""Test BDF with multiple annotation channels."""
raw = read_raw_bdf(bdf_multiple_annotations_path)
assert len(raw.annotations) == 10
descriptions = np.array(['signal_start', 'EEG-check#1', 'TestStim#1',
'TestStim#2', 'TestStim#3', 'TestStim#4',
'TestStim#5', 'TestStim#6', 'TestStim#7',
'Ligths-Off#1'], dtype='<U12')
assert_array_equal(descriptions, raw.annotations.description)
@testing.requires_testing_data
def test_edf_lowpass_zero():
"""Test if a lowpass filter of 0Hz is mapped to the Nyquist frequency."""
raw = read_raw_edf(edf_stim_resamp_path)
assert raw.ch_names[100] == 'EEG LDAMT_01-REF'
assert len(raw.ch_names[100]) > 15
assert_allclose(raw.info["lowpass"], raw.info["sfreq"] / 2)
@testing.requires_testing_data
def test_edf_annot_sub_s_onset():
"""Test reading of sub-second annotation onsets."""
raw = read_raw_edf(edf_annot_sub_s_path)
assert_allclose(raw.annotations.onset, [1.951172, 3.492188])
def test_invalid_date(tmpdir):
"""Test handling of invalid date in EDF header."""
with open(edf_path, 'rb') as f: # read valid test file
edf = bytearray(f.read())
# original date in header is 29.04.14 (2014-04-29) at pos 168:176
# but we also use Startdate if available,
# which starts at byte 88 and is b'Startdate 29-APR-2014 X X X'
# create invalid date 29.02.14 (2014 is not a leap year)
# one wrong: no warning
edf[101:104] = b'FEB'
assert edf[172] == ord('4')
fname = op.join(str(tmpdir), "temp.edf")
with open(fname, "wb") as f:
f.write(edf)
read_raw_edf(fname)
# other wrong: no warning
edf[101:104] = b'APR'
edf[172] = ord('2')
with open(fname, "wb") as f:
f.write(edf)
read_raw_edf(fname)
# both wrong: warning
edf[101:104] = b'FEB'
edf[172] = ord('2')
with open(fname, "wb") as f:
f.write(edf)
with pytest.warns(RuntimeWarning, match='Invalid date'):
read_raw_edf(fname)
# another invalid date 29.00.14 (0 is not a month)
assert edf[101:104] == b'FEB'
edf[172] = ord('0')
with open(fname, "wb") as f:
f.write(edf)
with pytest.warns(RuntimeWarning, match='Invalid date'):
read_raw_edf(fname)
def test_empty_chars():
"""Test blank char support."""
assert int(_edf_str(b'1819\x00 ')) == 1819
def _hp_lp_rev(*args, **kwargs):
out, orig_units = _read_edf_header(*args, **kwargs)
out['lowpass'], out['highpass'] = out['highpass'], out['lowpass']
# this will happen for test_edf_stim_resamp.edf
if len(out['lowpass']) and out['lowpass'][0] == '0.000' and \
len(out['highpass']) and out['highpass'][0] == '0.0':
out['highpass'][0] = '10.0'
return out, orig_units
@pytest.mark.filterwarnings('ignore:.*too long.*:RuntimeWarning')
@pytest.mark.parametrize('fname, lo, hi, warns', [
(edf_path, 256, 0, False),
(edf_uneven_path, 50, 0, False),
(edf_stim_channel_path, 64, 0, False),
pytest.param(edf_overlap_annot_path, 64, 0, False, marks=td_mark),
pytest.param(edf_reduced, 256, 0, False, marks=td_mark),
pytest.param(test_generator_edf, 100, 0, False, marks=td_mark),
pytest.param(edf_stim_resamp_path, 256, 0, True, marks=td_mark),
])
def test_hp_lp_reversed(fname, lo, hi, warns, monkeypatch):
"""Test HP/LP reversed (gh-8584)."""
fname = str(fname)
raw = read_raw_edf(fname)
assert raw.info['lowpass'] == lo
assert raw.info['highpass'] == hi
monkeypatch.setattr(edf.edf, '_read_edf_header', _hp_lp_rev)
if warns:
ctx = pytest.warns(RuntimeWarning, match='greater than lowpass')
new_lo, new_hi = raw.info['sfreq'] / 2., 0.
else:
ctx = nullcontext()
new_lo, new_hi = lo, hi
with ctx:
raw = read_raw_edf(fname)
assert raw.info['lowpass'] == new_lo
assert raw.info['highpass'] == new_hi
def test_degenerate():
"""Test checking of some bad inputs."""
for func in (read_raw_edf, read_raw_bdf, read_raw_gdf,
partial(_read_header, exclude=())):
with pytest.raises(NotImplementedError, match='Only.*txt.*'):
func(edf_txt_stim_channel_path)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/frame/methods/test_is_homogeneous_dtype.py | 4 | 1422 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
)
# _is_homogeneous_type always returns True for ArrayManager
pytestmark = td.skip_array_manager_invalid_test
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["a", "b"])}),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["b", "c"])}),
False,
),
],
)
def test_is_homogeneous_type(data, expected):
assert data._is_homogeneous_type is expected
| bsd-3-clause |
krez13/scikit-learn | sklearn/ensemble/weight_boosting.py | 23 | 40739 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
StefReck/Km3-Autoencoder | scripts/convergence_diagnosis.py | 1 | 13188 | # -*- coding: utf-8 -*-
"""
Create layer output histograms of the last layers of a network, while training.
"""
#import matplotlib
#matplotlib.use('Agg')
from keras.layers import Activation, Input, Lambda, Dropout, Dense, Flatten, Conv3D, MaxPooling3D, UpSampling3D,BatchNormalization, ZeroPadding3D, Conv3DTranspose, AveragePooling3D
from keras.models import load_model, Model
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
import h5py
from keras import optimizers, initializers
from matplotlib.backends.backend_pdf import PdfPages
from util.run_cnn import generate_batches_from_hdf5_file, encode_targets
plot_after_how_many_batches=[1,]#[1,2,3,10,15,20,25,30,40,50,60,100,200,300,400,500,1000,2000,3000,4000,5000] #0 is automatically plotted
#Which event(s) should be taken from the test file to make the histogramms
which_events = [0,1,2]#range(0,100)
name_of_plots="vgg_3_autoencoder_eps_epoch10_convergence_analysis" #added will be : _withBN.pdf
laptop=True
if laptop == True:
#autoencoder:
model_name_and_path="Daten/xzt/trained_vgg_3_eps_autoencoder_epoch10.h5"
#Data to produce from:
data = "Daten/xzt/JTE_KM3Sim_gseagen_elec-CC_3-100GeV-1_1E6-1bin-3_0gspec_ORCA115_9m_2016_100_xzt.h5"
zero_center = "Daten/xzt/train_muon-CC_and_elec-CC_each_240_xzt_shuffled.h5_zero_center_mean.npy"
test_data=data
else:
#autoencoder:
model_name_and_path="/home/woody/capn/mppi013h/Km3-Autoencoder/models/vgg_3_eps/trained_vgg_3_eps_autoencoder_epoch10.h5"
#Data to produce from:
#for xzt
data_path = "/home/woody/capn/mppi033h/Data/ORCA_JTE_NEMOWATER/h5_input_projections_3-100GeV/4dTo3d/h5/xzt/concatenated/"
train_data = "train_muon-CC_and_elec-CC_each_240_xzt_shuffled.h5"
test_data = "test_muon-CC_and_elec-CC_each_60_xzt_shuffled.h5"
zero_center_data = "train_muon-CC_and_elec-CC_each_240_xzt_shuffled.h5_zero_center_mean.npy"
data=data_path+train_data
zero_center=data_path+zero_center_data
test_data=data_path+test_data
def model_setup(autoencoder_model):
autoencoder = load_model(autoencoder_model)
#setup the vgg_3 model for convergence analysis
def conv_block(inp, filters, kernel_size, padding, trainable, channel_axis, strides=(1,1,1), dropout=0.0, ac_reg_penalty=0):
regular = regularizers.l2(ac_reg_penalty) if ac_reg_penalty is not 0 else None
x = Conv3D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer='he_normal', use_bias=False, trainable=trainable, activity_regularizer=regular)(inp)
x = BatchNormalization(axis=channel_axis, trainable=trainable)(x)
x = Activation('relu', trainable=trainable)(x)
if dropout > 0.0: x = Dropout(dropout)(x)
return x
def zero_center_and_normalize(x):
x-=K.mean(x, axis=1, keepdims=True)
x=x/K.std(x, axis=1, keepdims=True)
return x
def setup_vgg_3(autoencoder, with_batchnorm):
#832k params
train=False
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
inputs = Input(shape=(11,18,50,1))
x=conv_block(inputs, filters=32, kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis) #11x18x50
x=conv_block(x, filters=32, kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis) #11x18x50
x = AveragePooling3D((1, 1, 2), padding='valid')(x) #11x18x25
x=conv_block(x, filters=32, kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis) #11x18x25
x = ZeroPadding3D(((0,1),(0,0),(0,1)))(x) #12,18,26
x=conv_block(x, filters=32, kernel_size=(3,3,3), padding="valid", trainable=train, channel_axis=channel_axis) #10x16x24
x = AveragePooling3D((2, 2, 2), padding='valid')(x) #5x8x12
x=conv_block(x, filters=64, kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis) #5x8x12
x=conv_block(x, filters=64, kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis) #5x8x12
x = ZeroPadding3D(((0,1),(0,0),(0,0)))(x) #6x8x12
x=conv_block(x, filters=64, kernel_size=(3,3,3), padding="valid", trainable=train, channel_axis=channel_axis) #4x6x10
encoded = AveragePooling3D((2, 2, 2), padding='valid')(x) #2x3x5
encoder= Model(inputs=inputs, outputs=encoded)
for i,layer in enumerate(encoder.layers):
layer.set_weights(autoencoder.layers[i].get_weights())
x = Flatten()(encoded)
x = Lambda( zero_center_and_normalize )(x)
if with_batchnorm == True:
x = BatchNormalization(axis=channel_axis)(x) #
x = Dense(256, activation='relu', kernel_initializer='he_normal', bias_initializer=initializers.constant(0.0))(x) #init: std 0.032 = sqrt(2/1920), mean=0
x = Dense(16, activation='relu', kernel_initializer='he_normal', bias_initializer=initializers.constant(0.0))(x)
outputs = Dense(2, activation='softmax', kernel_initializer='he_normal', bias_initializer=initializers.constant(0.0))(x)
model = Model(inputs=inputs, outputs=outputs)
return model
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model_noBN = setup_vgg_3(autoencoder, with_batchnorm=False)
model_noBN.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model_withBN = setup_vgg_3(autoencoder, with_batchnorm=True)
model_withBN.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model_noBN, model_withBN
def check_for_dead_relus(model, how_many_batches, data_for_generator):
#Check for some samples if there are Relus that are never firing in the last 3 layers
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers[-3:]] # layer outputs
functors = [K.function([inp]+ [K.learning_phase()], [out]) for out in outputs]
generator = generate_batches_from_hdf5_file(filepath=data_for_generator, batchsize=32, n_bins=(11,18,50,1), class_type=(2, 'up_down'), is_autoencoder=False)
output=[]
stats_of_every_neuron = [np.zeros((256)),np.zeros((16)),np.zeros((2))]
for batch_no in range(1,1+how_many_batches):
print("Starting batch",batch_no, "...")
total_number_of_samples = 32*batch_no
x,y = next(generator)
layer_outs = [func([x, 0]) for func in functors] #3,1,(layerout)
for which_layer,layer in enumerate(layer_outs):
layer=layer[0]
for neuron_no in range(len(layer[-1])):
output_from_a_single_neuron = layer[:,neuron_no] #shape: batchsize
how_often_was_0_given = np.sum(output_from_a_single_neuron == 0)
stats_of_every_neuron[which_layer][neuron_no]+=how_often_was_0_given
temp_out=[]
for layer in stats_of_every_neuron:
temp_out.append(np.sum((layer==total_number_of_samples))/len(layer))
#print(temp_out)
output.append(temp_out)
return output
def make_histogramms_of_4_layers(centered_hists, layer_no_array, model_1, suptitle, title_array):
#histograms of outputs of 4 layers in a 2x2 plot
def get_out_from_layer(layer_no, model, centered_hists):
get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_no].output])
layer_output = get_layer_output([centered_hists,0])[0]
return layer_output
fig = plt.figure(figsize=(8,8))
for i,layer_no in enumerate(layer_no_array):
if layer_no==-1:
#Color the bars of the final layer red or green, depending on whether the prediction is right or not
enc_feat=get_out_from_layer(layer_no, model_1, centered_hists) #shape: batchsize,2
prediction=enc_feat>0.5
sample_is_correct = np.equal(prediction[:,0], correct_output[:,0]) #shape: batchsize
correct_output_values = enc_feat[sample_is_correct,:]
wrong_output_values = enc_feat[np.invert(sample_is_correct),:]
plt.subplot(221+i)
plt.title(title_array[i])
plt.hist([correct_output_values, wrong_output_values], 50, stacked=True, color=["green","red"], range=(0.5,1.0))
else:
enc_feat=get_out_from_layer(layer_no, model_1, centered_hists)
subax = plt.subplot(221+i)
plt.title(title_array[i])
data_to_plot=enc_feat.flatten()
if layer_no != -4 and data_to_plot[data_to_plot!=0].size is not 0:
number_of_zeros=np.sum(data_to_plot==0)
fraction_of_zeros=str(100*number_of_zeros/(len(data_to_plot))).split(".")[0]
data_to_plot=data_to_plot[data_to_plot!=0] #Remove 0s unless only 0s present
plt.text(0.98, 0.98,"Zero: "+str(number_of_zeros)+" ("+fraction_of_zeros+"%)", horizontalalignment='right', verticalalignment='top', transform=subax.transAxes)
plt.hist(data_to_plot, 100)
plt.suptitle(suptitle)
#plt.tight_layout()
return fig
def generate_plots(model_noBN, model_withBN, centered_hists, suptitles=["Layer outputs without batch normalization", "Layer outputs with batch normalization"]):
title_array1=["Output from frozen encoder", "First dense layer", "Second dense layer", "Third dense layer"]
title_array2=["Batch normalization", "First dense layer", "Second dense layer", "Third dense layer"]
fig_noBN = make_histogramms_of_4_layers(centered_hists, [-4,-3,-2,-1], model_noBN, suptitle=suptitles[0], title_array=title_array1)
fig_withBN = make_histogramms_of_4_layers(centered_hists, [-4,-3,-2,-1], model_withBN, suptitle=suptitles[1], title_array=title_array2)
return fig_noBN, fig_withBN
#Generate 0-centered histogramms:
file=h5py.File(test_data , 'r')
zero_center_image = np.load(zero_center)
# event_track: [event_id, particle_type, energy, isCC, bjorkeny, dir_x/y/z, time]
labels = file["y"][which_events]
hists = file["x"][which_events]
#Get some hists from the file
hists=hists.reshape((hists.shape+(1,))).astype(np.float32)
#0 center them
centered_hists = np.subtract(hists, zero_center_image)
correct_output = np.zeros((len(which_events), 2), dtype=np.float32)
# encode the labels such that they are all within the same range (and filter the ones we don't want for now)
for c, y_val in enumerate(labels): # Could be vectorized with numba, or use dataflow from tensorpack
correct_output[c] = encode_targets(y_val, class_type=(2, 'up_down')) #01 if dirz>0, 10 else
model_noBN, model_withBN = model_setup(autoencoder_model=model_name_and_path)
raise()
def convergence_analysis(model_noBN, model_withBN, centered_hists, plot_after_how_many_batches, data_for_generator):
history_noBN=[]
history_withBN=[]
figures=[]
fig1, fig2 = generate_plots(model_noBN, model_withBN, centered_hists, suptitles=["Layer outputs without batch normalization (0 batches)", "Layer outputs with batch normalization (0 batches)"])
figures.append([0,fig1,fig2])
plt.close("all")
generator = generate_batches_from_hdf5_file(filepath=data_for_generator, batchsize=32, n_bins=(11,18,50,1), class_type=(2, 'up_down'), is_autoencoder=False)
i=0
while i < max(plot_after_how_many_batches):
x,y = next(generator)
temp_hist_noBN = model_noBN.train_on_batch(x, y)
temp_hist_withBN = model_withBN.train_on_batch(x, y)
i+=1
if i in plot_after_how_many_batches:
history_noBN.append(temp_hist_noBN)
history_withBN.append(temp_hist_withBN)
print("Generating plot after ", i," batches...")
fig1, fig2 = generate_plots(model_noBN, model_withBN, centered_hists, suptitles=["Layer outputs without batch normalization ("+str(i)+" batches)", "Layer outputs with batch normalization ("+str(i)+" batches)"])
figures.append([i,fig1,fig2])
plt.close("all")
return history_noBN, history_withBN, figures
def save_to_pdf(figures, name_of_plots="Test"):
with PdfPages(name_of_plots+ "_noBN.pdf") as pp_noBN:
with PdfPages(name_of_plots+ "_withBN.pdf") as pp_withBN:
for tupel in figures:
pp_noBN.savefig(tupel[1])
pp_withBN.savefig(tupel[2])
plt.close("all")
print ("Dead relus (NoBN)", check_for_dead_relus(model_noBN, 100, data)[-1])
print ("Dead relus (WithBN)", check_for_dead_relus(model_withBN, 100, data)[-1])
#figures: [ int number of batches this plot was made after, figure no BN, figure w BN]
history_noBN, history_withBN, figures = convergence_analysis(model_noBN, model_withBN, centered_hists, plot_after_how_many_batches, data)
#print("No BN:", history_noBN)
#print("With BN:", history_withBN)
save_to_pdf(figures, name_of_plots)
print ("Dead relus (NoBN)", check_for_dead_relus(model_noBN, 100, data)[-1])
print ("Dead relus (WithBN)", check_for_dead_relus(model_withBN, 100, data)[-1])
| mit |
Aerolyzer/Aerolyzer | horizon.py | 1 | 4599 | import sys
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
def sigm(x):
return 1 / (1 + np.exp(-x))
def is_sky(img):
syn0 = np.array([[0.6106635051820115, -1.2018987127529588, -10.344605820189082, 1.1911213385074928, -6.818421664371254, 0.7888012143578024, 0.1930026599192343, 2.3468732267729644, -0.8629627172245428, -4.855127665505846, -8.782456796605247, -6.495787542595586, -1.42453153150294, -0.91145196348796, -0.34523737705411006], [-1.3963274415314406, -1.4612339780784143, -2.9000212540397685, -3.9905541370795463, -3.4490261869089287, -4.30542395055999, -2.6069427860345145, 7.201038210239841, -2.205826668689026, -2.493364425571145, -1.9813891706545306, -2.235792731073901, -7.475941696773453, -2.68683663270719, 4.173252030927632], [-0.5585916670209942, 0.3126863684210608, 2.142283443670229, 0.6422582372446218, 0.8699959804142926, 1.2677877625877656, 0.697665181045127, -4.116900256696914, 0.8735456225659666, -0.842712533453469, 1.1200739327640843, -0.703797233889045, 3.3491098693459187, 1.1383933429060538, -1.1608021413621255], [-0.0272945986039962, 1.3810803094898392, -0.3000751044667501, 0.530598483693932, -0.25230337237162953, 1.227322205409595, 0.7475404385595492, -4.708759516668004, 1.5170799948290143, -1.309427991379729, 0.13045771401578515, -1.2421270434590852, 5.141812566546993, 1.7478932634716013, -1.230678486397662], [-1.5471106279095554, -2.524731157065115, 1.0015792402542971, -3.649008251507766, -0.43193380458921354, -3.64779032623984, -1.2585955585366164, 7.075627752142407, -2.3434697661076553, -0.17324616725164094, 0.012324380796953634, 0.1201495802730507, -6.468182569926108, -1.0450745719122267, 3.1541002784637886], [0.5316498085997584, 1.8187154828158774, 0.6800840386512677, 3.154341773471645, -0.633596948312113, 2.770528037922082, 0.22043514814321089, -7.246507554283216, 1.3361606503168058, -1.8011391721619912, -0.7156002807301286, -0.37783520885870486, 6.373115811402003, 0.22971478266471973, -2.857966397739584]])
syn1 = np.array([[5.177044095570317],
[6.5898220063556],
[-20.881638524287233],
[8.880383432994854],
[-14.676726398416983],
[9.192745916291782],
[5.80497325212264],
[-16.424434027307676],
[6.820380663953862],
[-9.664844259044122],
[-17.73177812938899],
[-11.809681114121691],
[14.747050641950713],
[6.009983025197835],
[-9.571035518824162]])
mask = np.zeros(img.shape[:2], np.uint8)
mask[0:(img.shape[0] / 2), 0:img.shape[1]] = 255
masked_img = cv2.bitwise_and(img, img, mask = mask)
# Create histograms with 16 bins in range 0-255
color = ('b', 'g', 'r')
b, g, r = cv2.split(img)
dimy, dimx = img.shape[:2]
largest = [0, 0]
it = dimy / 200 #iterations = total number of rows(pixels) / 200
for i in range(dimy / 6, (dimy / 6) * 5, it): #only looking at the middle half of the image
ravg = (sum(r[i]) / float(len(r[i])))
gavg = (sum(g[i]) / float(len(g[i])))
bavg = (sum(b[i]) / float(len(b[i])))
avg = (ravg + gavg + bavg) / 3
pravg = (sum(r[i - it]) / float(len(r[i - it])))
pgavg = (sum(g[i - it]) / float(len(g[i - it])))
pbavg = (sum(b[i - it]) / float(len(b[i - it])))
pavg = (pravg + pgavg + pbavg) / 3
diff = pavg - avg
if diff > largest[0]: #only getting the largest intensity drop.
largest = [diff,i-(it/2)]
sky = img[0:largest[1], 0:dimx]#cropping out landscape
h1 = sky[0:(sky.shape[0] / 2), 0:dimx]#top half of sky
h2 = sky[(sky.shape[0] / 2):(sky.shape[0]), 0:dimx]#bottom half
mask1 = np.zeros(h1.shape[:2], np.uint8)
mask1[0:(h1.shape[0] / 2), 0:h1.shape[1]] = 255
hist1 = [0,0,0]
hist2 = [0,0,0]
max1 = [0,0,0]
max2 = [0,0,0]
for i,col in enumerate(color):
hist1[i] = cv2.calcHist([h1], [i], mask1, [255], [0, 255])
max1[i] = np.argmax(hist1[i][6:250])
mask2 = np.zeros(h2.shape[:2], np.uint8)
mask2[0:(h2.shape[0] / 2), 0:h2.shape[1]] = 255
for j,col in enumerate(color):
hist2[j] = cv2.calcHist([h2], [j], mask2, [255], [0, 255])
max2[j] = np.argmax(hist2[j][6:250])
X = np.array([float(max1[0])/255., float(max1[1])/255., float(max1[2])/255., float(max2[0])/255., float(max2[1])/255., float(max2[2])/255.])
l1dup = sigm(np.dot(X,syn0))
l2dup = sigm(np.dot(l1dup,syn1))
if float(l2dup) >= 0.5:
return True
return False
| apache-2.0 |
zooniverse/aggregation | experimental/dkMeansPaper/dk1.py | 2 | 3219 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import csv
import matplotlib.pyplot as plt
import random
import math
import urllib
import matplotlib.cbook as cbook
from scipy.stats.stats import pearsonr
from scipy.stats import beta
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
sys.path.append("/home/greg/github/reduction/experimental/classifier")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
from divisiveKmeans_2 import DivisiveKmeans_2
from kMeans import KMeans
#from kMedoids import KMedoids
#from agglomerativeClustering import agglomerativeClustering
from quadTree import Node
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
count = 0
for subject_index,subject in enumerate(collection2.find({"metadata.path":{"$regex" : ".*BAILa2014a.*"}})):
path = subject["metadata"]["path"]
#print path
if not("BAILa2014a" in path):
continue
if count == 100:
break
print count
count += 1
user_markings = []
user_ips = []
big_list = []
zooniverse_id = subject["zooniverse_id"]
for r in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
ip = r["user_ip"]
n = 0
xy_list = []
try:
if isinstance(r["annotations"][1]["value"],dict):
for marking in r["annotations"][1]["value"].values():
if marking["value"] in ["adult","chick"]:
x,y = (float(marking["x"]),float(marking["y"]))
if (x,y,ip) in big_list:
print "--"
continue
big_list.append((x,y,ip))
user_markings.append((x,y))
user_ips.append(ip)
except KeyError:
print r["annotations"]
user_identified_condors,clusters,users = DivisiveKmeans(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors,clusters,users = DivisiveKmeans_2(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors,clusters,users = KMedoids(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors = agglomerativeClustering(zip(user_markings,user_ips))
quadRoot = Node(0,0,1000,750)
for (m,u) in zip(user_markings,user_ips):
quadRoot.__add_point__((m,u))
quadRoot.__ward_traverse__()
break
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/scipy/stats/_binned_statistic.py | 28 | 25272 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable, xrange
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = {}
for i in xrange(Ndim):
sampBin[i] = np.digitize(sample[:, i], edges[i])
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
# `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
binnumbers = np.zeros(Dlen, int)
for i in xrange(0, Ndim - 1):
binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
binnumbers += sampBin[ni[-1]]
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, np.sort(nbin)))
for i in xrange(nbin.size):
j = ni.argsort()[i]
# Accomodate the extra `Vdim` dimension-zero with `+1`
result = result.swapaxes(i+1, j+1)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| gpl-3.0 |
idaholab/raven | tests/framework/Samplers/Categorical/StringVars/proj_second.py | 2 | 1667 | import numpy as np
def run(self,Input):
v0 = self.v0
y0 = self.y0
ang = 45.*np.pi/180.
times = np.linspace(0,2,5)
mode = self.mode
if mode == 'stepper':
y = stepper(v0,y0,ang,times)
elif mode == 'analytic':
y = analytic(v0,y0,ang,times)
else:
raise IOError('Unrecognized mode:',mode)
self.y = np.atleast_1d(y)
self.t = np.atleast_1d(times)
self.restartID = np.array([2]*len(times))
def analytic(v0,y0,ang,times):
ys = []
# initial y velocity
vy0 = v0[0] * np.sin(ang)
for t in times:
# calculate analytic height
y = y0[0] + vy0*t - 0.5*9.8*t*t
# calculate analytic velocity magnitude
v = np.sqrt(v0[0]*v0[0] + 2.0*(9.8)*(y0[0]-y))
ys.append(y)
return ys
def stepper(v0,y0,ang,times):
# initial x position
x = 0.0
y = y0[0]
# initial x,y velocity
vx = v0 * np.cos(ang)
vy = v0 * np.sin(ang)
dt = times[1] - times[0]
# tracker
ys = []
for _ in times:
# store current values
#v = np.sqrt(vx*vx + vy*vy)
ys.append(y)
# update velocity
vx = vx
vy = vy - 9.8*dt
# update position
x = x + vx*dt
y = y + vy*dt
return ys
class data:
def __init__(self,v0,y0,mode):
self.v0 = v0
self.y0 = y0
self.mode = mode
self.y = None
self.t = None
if __name__=='__main__':
import matplotlib.pyplot as plt
# initialize
y0 = 1.0
v0 = 15.0
# test
rdata = {}
for mode in ['stepper','analytic']:
# set up input class
dat = data(v0,y0,mode)
rdata[mode] = dat
# run
run(dat,None)
# plot
plt.plot(dat.t,dat.y,'-o',label=mode)
plt.legend(loc=0)
plt.xlabel('time')
plt.ylabel('height')
plt.show()
| apache-2.0 |
pavanramkumar/pyglmnet | examples/plot_tikhonov.py | 1 | 8672 | # -*- coding: utf-8 -*-
"""
========================
Tikhonov Regularization
========================
Tikhonov regularization is a generalized form of L2-regularization. It allows
us to articulate our prior knowlege about correlations between
different predictors with a multivariate Gaussian prior. Here, we demonstrate
how pyglmnet's Tikhonov regularizer can be used to estimate spatiotemporal
receptive fields (RFs) from neural data.
Neurons in many brain areas, including the frontal eye fields (FEF) have RFs,
defined as regions in the visual field where visual stimuli are most likely
to result in spiking activity.
These spatial RFs need not be static, they can vary in time in a
systematic way. We want to characterize how such spatiotemporal RFs (STRFs)
remap from one fixation to the next. Remapping is a phenomenon where
the RF of a neuron shifts to process visual information from the subsequent
fixation, prior to the onset of the saccade. The dynamics of this shift
from the "current" to the "future" RF is an active area of research.
With Tikhonov regularization, we can specify a prior covariance matrix
to articulate our belief that parameters encoding neighboring points
in space and time are correlated.
The unpublished data are courtesy of Daniel Wood and Mark Segraves,
Department of Neurobiology, Northwestern University.
"""
########################################################
# Author: Pavan Ramkumar <pavan.ramkumar@gmail.com>
# License: MIT
########################################################
# Imports
import os.path as op
import numpy as np
import pandas as pd
from pyglmnet import GLMCV
from spykes.ml.strf import STRF
import matplotlib.pyplot as plt
from tempfile import TemporaryDirectory
########################################################
# Download and fetch data files
from pyglmnet.datasets import fetch_tikhonov_data
with TemporaryDirectory(prefix="tmp_glm-tools") as temp_dir:
dpath = fetch_tikhonov_data(dpath=temp_dir)
fixations_df = pd.read_csv(op.join(dpath, 'fixations.csv'))
probes_df = pd.read_csv(op.join(dpath, 'probes.csv'))
probes_df = pd.read_csv(op.join(dpath, 'probes.csv'))
spikes_df = pd.read_csv(op.join(dpath, 'spiketimes.csv'))
spiketimes = np.squeeze(spikes_df.values)
########################################################
# Design spatial basis functions
n_spatial_basis = 36
n_temporal_basis = 7
strf_model = STRF(patch_size=50, sigma=5,
n_spatial_basis=n_spatial_basis,
n_temporal_basis=n_temporal_basis)
spatial_basis = strf_model.make_gaussian_basis()
strf_model.visualize_gaussian_basis(spatial_basis)
########################################################
# Design temporal basis functions
time_points = np.linspace(-100., 100., 10)
centers = [-75., -50., -25., 0, 25., 50., 75.]
temporal_basis = strf_model.make_raised_cosine_temporal_basis(
time_points=time_points,
centers=centers,
widths=10. * np.ones(7))
plt.plot(time_points, temporal_basis)
plt.show()
########################################################
# Design parameters
# Spatial extent
n_shape = 50
n_features = n_spatial_basis
# Window of interest
window = [-100, 100]
# Bin size
binsize = 20
# Zero pad bins
n_zero_bins = int(np.floor((window[1] - window[0]) / binsize / 2))
########################################################
# Build design matrix
bin_template = np.arange(window[0], window[1] + binsize, binsize)
n_bins = len(bin_template) - 1
probetimes = probes_df['t_probe'].values
spatial_features = np.zeros((0, n_features))
spike_counts = np.zeros((0,))
fixation_id = np.zeros((0,))
# For each fixation
for fx in fixations_df.index[:1000]:
# Fixation time
fixation_time = fixations_df.loc[fx]['t_fix_f']
this_fixation_spatial_features = np.zeros((n_bins, n_spatial_basis))
this_fixation_spikecounts = np.zeros(n_bins)
unique_fixation_id = fixations_df.loc[fx]['trialNum_f']
unique_fixation_id += 0.01 * fixations_df.loc[fx]['fixNum_f']
this_fixation_id = unique_fixation_id * np.ones(n_bins)
# Look for probes in window of interest relative to fixation
probe_ids = np.searchsorted(probetimes,
[fixation_time + window[0] + 0.1,
fixation_time + window[1] - 0.1])
# For each such probe
for probe_id in range(probe_ids[0], probe_ids[1]):
# Check if probe lies within spatial region of interest
fix_row = fixations_df.loc[fx]['y_curFix_f']
fix_col = fixations_df.loc[fx]['x_curFix_f']
probe_row = probes_df.loc[probe_id]['y_probe']
probe_col = probes_df.loc[probe_id]['x_probe']
if ((probe_row - fix_row) > -n_shape / 2 and
(probe_row - fix_row) < n_shape / 2 and
(probe_col - fix_col) > -n_shape / 2 and
(probe_col - fix_col) < n_shape / 2):
# Get probe timestamp relative to fixation
probe_time = probes_df.loc[probe_id]['t_probe']
probe_bin = np.where(bin_template < (probe_time - fixation_time))
probe_bin = probe_bin[0][-1]
# Define an image based on the relative locations
img = np.zeros(shape=(n_shape, n_shape))
row = int(-np.round(probe_row - fix_row) + n_shape / 2 - 1)
col = int(np.round(probe_col - fix_col) + n_shape / 2 - 1)
img[row, col] = 1
# Compute projection
basis_projection = strf_model.project_to_spatial_basis(
img, spatial_basis)
this_fixation_spatial_features[probe_bin, :] = basis_projection
# Count spikes in window of interest relative to fixation
bins = fixation_time + bin_template
searchsorted_idx = np.searchsorted(spiketimes,
[fixation_time + window[0],
fixation_time + window[1]])
this_fixation_spike_counts = np.histogram(
spiketimes[searchsorted_idx[0]:searchsorted_idx[1]], bins)[0]
# Accumulate
fixation_id = np.concatenate((fixation_id, this_fixation_id), axis=0)
spatial_features = np.concatenate((spatial_features,
this_fixation_spatial_features), axis=0)
spike_counts = np.concatenate((spike_counts,
this_fixation_spike_counts), axis=0)
# Zero pad
spatial_features = np.concatenate((
spatial_features, np.zeros((n_zero_bins, n_spatial_basis))))
fixation_id = np.concatenate((fixation_id, -999. * np.ones(n_zero_bins)))
# Convolve with temporal basis
features = strf_model.convolve_with_temporal_basis(spatial_features,
temporal_basis)
# Remove zeropad
features = features[fixation_id != -999.]
########################################################
# Visualize the distribution of spike counts
plt.hist(spike_counts, 10)
plt.show()
########################################################
# Plot a few rows of the design matrix
plt.imshow(features[30:150, :], interpolation='none')
plt.show()
#################################################################
# Design prior covariance matrix for Tikhonov regularization
prior_cov = strf_model.design_prior_covariance(
sigma_temporal=3.,
sigma_spatial=5.)
plt.imshow(prior_cov, cmap='Greys', interpolation='none')
plt.colorbar()
plt.show()
np.shape(prior_cov)
########################################################
# Fit models
from sklearn.model_selection import train_test_split # noqa
Xtrain, Xtest, Ytrain, Ytest = train_test_split(
features, spike_counts,
test_size=0.2,
random_state=42)
########################################################
from pyglmnet import utils # noqa
n_samples = Xtrain.shape[0]
Tau = utils.tikhonov_from_prior(prior_cov, n_samples)
glm = GLMCV(distr='poisson', alpha=0., Tau=Tau, score_metric='pseudo_R2', cv=3)
glm.fit(Xtrain, Ytrain)
print("train score: %f" % glm.score(Xtrain, Ytrain))
print("test score: %f" % glm.score(Xtest, Ytest))
weights = glm.beta_
########################################################
# Visualize
for time_bin_ in range(n_temporal_basis):
RF = strf_model.make_image_from_spatial_basis(
spatial_basis,
weights[range(time_bin_, n_spatial_basis * n_temporal_basis,
n_temporal_basis)])
plt.subplot(1, n_temporal_basis, time_bin_ + 1)
plt.imshow(RF, cmap='Blues', interpolation='none')
titletext = str(centers[time_bin_])
plt.title(titletext)
plt.axis('off')
plt.show()
########################################################
| mit |
arielmakestuff/loadlimit | test/unit/stat/test_timeseries.py | 1 | 6054 | # -*- coding: utf-8 -*-
# test/unit/stat/test_timeseries.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test timeseries()"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import asyncio
from functools import partial
# Third-party imports
from pandas import DataFrame, to_timedelta
import pytest
from sqlalchemy import create_engine
# Local imports
import loadlimit.channel as channel
from loadlimit.core import BaseLoop
import loadlimit.result as result
from loadlimit.result import SQLTimeSeries, TimeSeries
import loadlimit.stat as stat
from loadlimit.stat import (CountStore, flushtosql, flushtosql_shutdown,
SendTimeData)
from loadlimit.util import aiter
# ============================================================================
# Fixtures
# ============================================================================
pytestmark = pytest.mark.usefixtures('fake_shutdown_channel',
'fake_timedata_channel')
# ============================================================================
# Tests
# ============================================================================
def test_return_two_df(testloop):
"""Timeseries generates 2 dataframes"""
measure = CountStore()
results = TimeSeries(countstore=measure)
# Create coro to time
@measure(name='churn')
async def churn(i):
"""Do nothing"""
await asyncio.sleep(0)
async def run():
"""run"""
async for i in aiter(range(500)):
await churn(i)
await channel.shutdown.send(0)
# Setup SendTimeData
send = SendTimeData(measure, flushwait=to_timedelta(0, unit='s'),
channel=stat.timedata)
# Add to shutdown channel
channel.shutdown(send.shutdown)
channel.shutdown(stat.timedata.shutdown)
# Run all the tasks
with BaseLoop() as main:
# Schedule SendTimeData coro
asyncio.ensure_future(send())
# Start every event, and ignore events that don't have any tasks
stat.timedata.open()
stat.timedata.start(asyncfunc=False, statsdict=results.statsdict)
asyncio.ensure_future(run())
main.start()
ret = results()
assert len(ret) == 2
assert all(isinstance(r, DataFrame) and not r.empty for r in ret)
# import pandas as pd
# pd.set_option('display.max_columns', 500)
# df_response, df_rate = ret
# print(df_response)
# print('-----------------------')
# print(df_rate)
@pytest.mark.parametrize('num', [1000])
def test_sqltimeseries(testloop, num):
"""SQL timeseries works well"""
measure = CountStore()
# Setup sqlalchemy engine
engine = create_engine('sqlite://')
timeseries = SQLTimeSeries(sqlengine=engine, countstore=measure)
# Create coro to time
@measure(name='churn')
async def churn(i):
"""Do nothing"""
await asyncio.sleep(0)
async def run():
"""run"""
async for i in aiter(range(num)):
await churn(i)
await channel.shutdown.send(0)
# Setup SendTimeData
send = SendTimeData(measure, flushwait=to_timedelta(0, unit='s'),
channel=stat.timedata)
# Add to shutdown event
channel.shutdown(send.shutdown)
channel.shutdown(partial(flushtosql_shutdown,
statsdict=timeseries.statsdict, sqlengine=engine))
channel.shutdown(stat.timedata.shutdown)
# Add flushtosql to timedata event
stat.timedata(flushtosql)
# Run all the tasks
with BaseLoop() as main:
# Schedule SendTimeData coro
asyncio.ensure_future(send())
# Start every event, and ignore events that don't have any tasks
stat.timedata.open()
stat.timedata.start(asyncfunc=False, statsdict=timeseries.statsdict,
flushlimit=500, sqlengine=engine)
asyncio.ensure_future(run())
main.start()
assert timeseries.statsdict.numdata == 0
results = timeseries()
assert len(results) == 2
assert all(isinstance(r, DataFrame) and not r.empty for r in results)
# import pandas as pd
# pd.set_option('display.max_columns', 500)
# df_response, df_rate = results
# print(df_response)
# print('-----------------------')
# print(df_rate)
# ============================================================================
# Test calculate (no data)
# ============================================================================
def test_calculate_nodata(statsdict):
"""Set results for a key to None if no data"""
measure = CountStore()
key = '42'
calc = result.TimeSeries(statsdict=statsdict, countstore=measure)
calc.__enter__()
calc.calculate(key, [], [], [])
vals = calc.vals
assert vals
assert vals.response_result[key] is None
assert vals.rate_result[key] is None
# ============================================================================
# Test export
# ============================================================================
def test_export_nodata(monkeypatch, statsdict):
"""Do not call exportdf() if there are no results"""
measure = CountStore()
calc = TimeSeries(statsdict=statsdict, countstore=measure)
called = False
def fake_exportdf(self, df, name, export_type, exportdir):
nonlocal called
called = True
monkeypatch.setattr(TimeSeries, 'exportdf', fake_exportdf)
with calc:
pass
results = calc.vals.results
assert len(results) == 2
assert results == (None, None)
calc.export('EXPORTTYPE', 'EXPORTDIR')
assert called is False
# ============================================================================
#
# ============================================================================
| mit |
caisq/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 39 | 3957 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/device:GPU:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/device:GPU:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
lpryszcz/bin | sam2hist.py | 1 | 2337 | #!/usr/bin/env python
desc="""Report histogram for given insert size data
"""
epilog="""Author:
l.p.pryszcz+git@gmail.com
Barcelona, 18/10/2012
"""
import argparse, os, sys
from datetime import datetime
def plot( isizes,outfn ):
"""
"""
import matplotlib.pyplot as plt
# the histogram of the data
n, bins, patches = plt.hist(isizes, 50, normed=0, facecolor='g', alpha=0.75)
plt.xlabel('Insert size')
plt.ylabel('Occurencies')
plt.title('Histogram of insert size [%s]' % os.path.basename(outfn).split(".")[0] )
#plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
plt.grid(True)
if not outfn:
plt.show()
else:
plt.savefig(outfn)
def sam2hist( handle,outfn,colnumb,verbose ):
"""
"""
#
isizes = []
for l in handle:
try:
i = int(l.split('\t')[colnumb])
isizes.append(i)
except:
if verbose:
sys.stderr.write( "Warning: Cannot read isize in column %s for line: %s\n" % (colnumb,str(l.split('\t'))) )
#plot
plot( isizes,outfn )
def main():
usage = "usage: samtools view -f35 BAM [region] | %(prog)s [options]"
parser = argparse.ArgumentParser( usage=usage,description=desc,epilog=epilog )
parser.add_argument("-v", dest="verbose", default=False, action="store_true")
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument("-i", dest="input", default=sys.stdin, type=file, #argparse.FileType('r'),
help="input sam file [%(default)s]" )
parser.add_argument("-c", dest="column", default=8, type=int,
help="column number 0-based [%(default)s]" )
parser.add_argument("-o", dest="outfn", default="", type=str,
help="output fname [%(default)s]" )
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
#create outdir
outdir = os.path.dirname( o.outfn )
if outdir and not os.path.isdir( outdir ):
os.makedirs( outdir )
sam2hist( o.input,o.outfn,o.column,o.verbose )
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt ) | gpl-3.0 |
kazemakase/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
JasonKessler/scattertext | demo_category_frequencies.py | 1 | 1662 | import pandas as pd
import scattertext as st
'''
Sample genre frequencies from the Corpus of Contemporary American English via
https://www.wordfrequency.info/100k_compare_to_60k_etc.asp .
We'll examine the difference between spoken and fiction, and just consider the top 1000
words in the sample.
'''
df = (pd.read_excel('https://www.wordfrequency.info/files/genres_sample.xls')
.dropna()
.set_index('lemma')[['SPOKEN', 'FICTION']]
.iloc[:1000])
term_cat_freq = st.TermCategoryFrequencies(df)
html = st.produce_scattertext_explorer(
term_cat_freq,
category='SPOKEN',
category_name='Spoken',
not_category_name='Fiction',
)
fn = 'demo_category_frequencies.html'
open(fn, 'wb').write(html.encode('utf-8'))
print('Open ./' + fn + ' in Chrome or Firefox.')
import requests, zipfile, io
coca_sample_url = 'http://corpus.byu.edu/cocatext/samples/text.zip'
zip_file = zipfile.ZipFile(io.BytesIO(requests.get(coca_sample_url).content))
document_df = pd.DataFrame(
[{'text': zip_file.open(fn).read().decode('utf-8'),
'category': 'SPOKEN'}
for fn in zip_file.filelist if fn.filename.startswith('w_spok')][:2]
+ [{'text': zip_file.open(fn).read().decode('utf-8'),
'category': 'FICTION'}
for fn in zip_file.filelist if fn.filename.startswith('w_fic')][:2])
doc_term_cat_freq = st.TermCategoryFrequencies(df, document_category_df=document_df)
html = st.produce_scattertext_explorer(
doc_term_cat_freq,
category='SPOKEN',
category_name='Spoken',
not_category_name='Fiction',
)
fn = 'demo_category_frequencies_sample_docs.html'
open(fn, 'wb').write(html.encode('utf-8'))
print('Open ./' + fn + ' in Chrome or Firefox.')
| apache-2.0 |
MurphysLab/ADAblock | Data_Sort_and_Plot_Script.py | 1 | 70948 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data_Sort_and_Plot_Script.py
Script created by Jeffrey N. Murphy (2015); Email: jnmurphy@ualberta.ca
Provided without any warranty.
The directory locations will need to be modified, depending on the location of the input file.
The input for this script is the file produced by Data_Amalgamation_Script.py
"""
Save_Directory = "C:\\Users\\Jeffrey\\Plot Files" #Where plot files iwll be saved
CSV_file = "C:\\Users\\Jeffrey\\Summary Files\\Summary_2.csv"
#CSV_file = "/home/jeffrey/Summary Files/Summary_2.csv" #tux
CSV_directory = CSV_file[0:CSV_file.find("Summary_2.csv")]
output_filename = "Summary_5.csv"
import os
windows = "\\"
linux = "//"
filesep = os.path.sep
import csv
import numpy as np
dataset = []
dataset_index = -1
labels_old = [
"Image_Title.String","Version","Defect_Density_um",
"Total_Area_nm","correlation_length","opa_Hermans",
"LER_width_avg","LER_sigma_avg","wfft_Period_px",
"wfft_Period_nm","nm_per_pixel"
]
labels = ["Image_Number", "Image_Title.String", "Output_Folder", "Date", "Time", "Version", "nm_per_pixel", "Width_initial", "Height_initial", "Crop_1", "wfft_Period_px", "wfft_Period_nm", "Smoothing", "Smoothing_radius", "Crop_2", "Width_final", "Height_final", "Threshold.String", "Threshold", "Threshold.Auto-Local.String", "Up_Thresh", "Low_Thresh", "PA.pos_nPixels.1", "PA.pos_mean.1", "PA.pos_min.1", "PA.pos_max.1", "PA.pos_std.1", "PA.neg_nPixels.1", "PA.neg_mean.1", "PA.neg_min.1", "PA.neg_max.1", "PA.neg_std.1", "PA.nPositive.1", "PA.nNegative.1", "PA.nTotal.1", "PA.pos_nPixels.2", "PA.pos_mean.2", "PA.pos_min.2", "PA.pos_max.2", "PA.pos_std.2", "PA.neg_nPixels.2", "PA.neg_mean.2", "PA.neg_min.2", "PA.neg_max.2", "PA.neg_std.2", "PA.nPositive.2", "PA.nNegative.2", "PA.nTotal.2", "PA.SET.large_drops", "PA.SET.min_area", "PA.SET.wlsq_iterations", "PA.WLSQ.positive.0", "PA.WLSQ.positive.1", "PA.WLSQ.positive.2", "PA.WLSQ.positive.3", "PA.WLSQ.positive.4", "PA.WLSQ.positive.5", "PA.WLSQ.positive.6", "PA.WLSQ.negative.0", "PA.WLSQ.negative.1", "PA.WLSQ.negative.2", "PA.WLSQ.negative.3", "PA.WLSQ.negative.4", "PA.WLSQ.negative.5", "PA.WLSQ.negative.6", "PA.Width.positive", "PA.Width.negative", "PA.Width.proportion", "PA.Blobs.p.i.count", "PA.Blobs.p.i.area", "PA.Blobs.p.f.area", "PA.Blobs.p.f.count", "PA.Blobs.n.i.count", "PA.Blobs.n.i.area", "PA.Blobs.n.f.area", "PA.Blobs.n.f.count", "dot_max_area_pos", "pos_edge_dot_count", "pos_dot_count", "dot_max_area_neg", "neg_edge_dot_count", "neg_dot_count", "line_min_area_pos", "pos_edge_line_count", "pos_line_count", "line_min_area_neg", "neg_edge_line_count", "neg_line_count", "pos_min_area", "neg_min_area", "pos_mDist", "neg_mDist", "pos_t_defects.L", "pos_j_defects.L", "neg_t_defects.L", "neg_j_defects.L", "Skel.Coverage.Metric.pos", "Skel.Coverage.Metric.neg", "Correlation.Phase", "opa_factor", "opa_set_ds", "opa_set_ds_points", "opa_Hermans", "Array_Length_initial", "Array_Length_downsampled", "correlation_length", "correlation_length_linear", "opa_bar_R_squared", "loop_count", "P.LER_sigma_avg", "N.LER_sigma_avg", "LER_length_total_px", "LER_N_width_avg", "LER_P_width_avg", "LER_width_avg", "P.E.sigma.avg", "P.E.sigma.sigma", "P.E.sigma.min", "P.E.sigma.max", "P.E.sigma.count", "P.E.avg.avg", "P.E.avg.sigma", "P.E.avg.min", "P.E.avg.max", "P.E.avg.count", "P.E.count.avg", "P.E.count.sigma", "P.E.count.min", "P.E.count.max", "P.E.count.count", "P.E.sum.avg", "P.E.sum.sigma", "P.E.sum.min", "P.E.sum.max", "P.E.sum.count", "P.E.min.avg", "P.E.min.sigma", "P.E.min.min", "P.E.min.max", "P.E.min.count", "P.E.max.avg", "P.E.max.sigma", "P.E.max.min", "P.E.max.max", "P.E.max.count", "P.W.sigma.avg", "P.W.sigma.sigma", "P.W.sigma.min", "P.W.sigma.max", "P.W.sigma.count", "P.W.avg.avg", "P.W.avg.sigma", "P.W.avg.min", "P.W.avg.max", "P.W.avg.count", "P.W.count.avg", "P.W.count.sigma", "P.W.count.min", "P.W.count.max", "P.W.count.count", "P.W.sum.avg", "P.W.sum.sigma", "P.W.sum.min", "P.W.sum.max", "P.W.sum.count", "P.W.min.avg", "P.W.min.sigma", "P.W.min.min", "P.W.min.max", "P.W.min.count", "P.W.max.avg", "P.W.max.sigma", "P.W.max.min", "P.W.max.max", "P.W.max.count", "N.E.sigma.avg", "N.E.sigma.sigma", "N.E.sigma.min", "N.E.sigma.max", "N.E.sigma.count", "N.E.avg.avg", "N.E.avg.sigma", "N.E.avg.min", "N.E.avg.max", "N.E.avg.count", "N.E.count.avg", "N.E.count.sigma", "N.E.count.min", "N.E.count.max", "N.E.count.count", "N.E.sum.avg", "N.E.sum.sigma", "N.E.sum.min", "N.E.sum.max", "N.E.sum.count", "N.E.min.avg", "N.E.min.sigma", "N.E.min.min", "N.E.min.max", "N.E.min.count", "N.E.max.avg", "N.E.max.sigma", "N.E.max.min", "N.E.max.max", "N.E.max.count", "N.W.sigma.avg", "N.W.sigma.sigma", "N.W.sigma.min", "N.W.sigma.max", "N.W.sigma.count", "N.W.avg.avg", "N.W.avg.sigma", "N.W.avg.min", "N.W.avg.max", "N.W.avg.count", "N.W.count.avg", "N.W.count.sigma", "N.W.count.min", "N.W.count.max", "N.W.count.count", "N.W.sum.avg", "N.W.sum.sigma", "N.W.sum.min", "N.W.sum.max", "N.W.sum.count", "N.W.min.avg", "N.W.min.sigma", "N.W.min.min", "N.W.min.max", "N.W.min.count", "N.W.max.avg", "N.W.max.sigma", "N.W.max.min", "N.W.max.max", "N.W.max.count", "V.E.sigma.avg", "V.E.sigma.sigma", "V.E.sigma.min", "V.E.sigma.max", "V.E.sigma.count", "V.E.avg.avg", "V.E.avg.sigma", "V.E.avg.min", "V.E.avg.max", "V.E.avg.count", "V.E.count.avg", "V.E.count.sigma", "V.E.count.min", "V.E.count.max", "V.E.count.count", "V.E.sum.avg", "V.E.sum.sigma", "V.E.sum.min", "V.E.sum.max", "V.E.sum.count", "V.E.min.avg", "V.E.min.sigma", "V.E.min.min", "V.E.min.max", "V.E.min.count", "V.E.max.avg", "V.E.max.sigma", "V.E.max.min", "V.E.max.max", "V.E.max.count", "V.W.sigma.avg", "V.W.sigma.sigma", "V.W.sigma.min", "V.W.sigma.max", "V.W.sigma.count", "V.W.avg.avg", "V.W.avg.sigma", "V.W.avg.min", "V.W.avg.max", "V.W.avg.count", "V.W.count.avg", "V.W.count.sigma", "V.W.count.min", "V.W.count.max", "V.W.count.count", "V.W.sum.avg", "V.W.sum.sigma", "V.W.sum.min", "V.W.sum.max", "V.W.sum.count", "V.W.min.avg", "V.W.min.sigma", "V.W.min.min", "V.W.min.max", "V.W.min.count", "V.W.max.avg", "V.W.max.sigma", "V.W.max.min", "V.W.max.max", "V.W.max.count", "Skel.Dist.avg", "Skel.Dist.sigma", "Skel.Dist.min", "Skel.Dist.max", "Skel.Dist.count", "DRAW_edge_limit", "DRAW_jn_radius", "edge_limit", "PTE", "NTE", "PT", "NT", "PDE", "NDE", "PD", "ND", "PJ3", "NJ3", "PJ4", "NJ4", "PJx", "NJx", "Ptot", "Ntot", "Total_Defects", "Total_Area_px", "Total_Area_nm", "Defect_Density_nm", "Defect_Density_um"]
def clean_number(s):
if type(s) != type(''):
return True
else:
if s[0] == '-' and len(s)>1:
s = s[1:]
s = s.replace('"','')
return s.replace('.','',1).isdigit()
def clean_my_data(x_series,y_series):
x_new , y_new = [] , []
for i in range(0,len(x_series)):
if type(x_series[i]) != type('') and type(y_series[i]) != type(''):
x_new.append(x_series[i])
y_new.append(y_series[i])
return x_new , y_new
def clean_my_data_3(x_series,y_series,z_series):
x_new , y_new , z_new = [] , [] , []
for i in range(0,len(x_series)):
if type(x_series[i]) != type('') and type(y_series[i]) != type('') and type(z_series[i]) != type(''):
x_new.append(x_series[i])
y_new.append(y_series[i])
z_new.append(z_series[i])
return x_new , y_new , z_new
def clean_my_data_4(x_series,y_series,z_series, a_series):
x_new , y_new , z_new , a_new = [] , [] , [] , []
for i in range(0,len(x_series)):
if type(x_series[i]) != type('') and type(y_series[i]) != type('') and type(z_series[i]) != type('') and type(a_series[i]) != type(''):
x_new.append(x_series[i])
y_new.append(y_series[i])
z_new.append(z_series[i])
a_new.append(a_series[i])
return x_new , y_new , z_new , a_new
def clean_my_data_5(x_series,y_series,z_series, a_series, b_series):
x_new , y_new , z_new , a_new , b_new = [] , [] , [] , [] , []
for i in range(0,len(x_series)):
if type(x_series[i]) != type('') and type(y_series[i]) != type('') and type(z_series[i]) != type('') and type(a_series[i]) != type('') and type(b_series[i]) != type(''):
x_new.append(x_series[i])
y_new.append(y_series[i])
z_new.append(z_series[i])
a_new.append(a_series[i])
b_new.append(b_series[i])
return x_new , y_new , z_new , a_new , b_new
rfile = open(CSV_file,"rb")
reader = csv.reader(rfile)
temporary_data = []
for row in reader:
temp = row[0].split("\t")
for i in range(0,len(temp)):
temp[i] = temp[i].replace('"""', '"')
temp[i] = temp[i].replace('""', '"')
if clean_number(temp[i]):
temp[i] = float(temp[i].replace('"',''))
#print(clean_number(temp[1]))
#print(temp[1])
#print("\n")
temporary_data.append(temp)
rfile.close()
ofile = open(CSV_directory + output_filename, "wb")
writer = csv.writer(ofile, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL) #NONNUMERIC) #quoting=csv.QUOTE_NONE)
for i in range(0,len(temporary_data)):
data = []
for j in range(0,len(temporary_data[i])):
data.append(temporary_data[i][j])
#print("\n")
writer.writerow(temporary_data[i])
#writer.writerow(data)
ofile.close()
print("DATA EXPORTED")
"""
Assembling Data
"""
''' Get BCP List '''
bcp_set = []
bcp_legend = []
bcp_row = []
bcp_datasets = []
data_labels = labels #["nm_per_pixel", "wfft_Period_px", "wfft_Period_nm", "V.W.sigma.avg", "V.W.avg.avg","V.E.sigma.avg","V.E.avg.avg","V.W.avg.sigma"]
data_label_index = []
for i in range(1,len(temporary_data)):
# 1: skip the first line with column headings
bcp = temporary_data[i][0].split(' ')
if bcp[0] not in bcp_set:
bcp_set.append(bcp[0])
bcp_datasets.append([])
bcp_row.append([])
#for k in range(0,len(data_labels)):
# bcp_datasets[len(bcp_datasets)-1].append([])
bcp_set.sort()
for i in range(0,len(bcp_set)):
bcp_legend.append(bcp_set[i].replace("-",".").replace("_","-"))
print(bcp_set)
print(bcp_datasets)
''' Find all of the relevant rows for each BCP set
No need to re-confirm that it matches the BCP '''
for i in range(0,len(bcp_set)):
values = []
for row in range(1,len(temporary_data)):
# 1: skip the first line with column headings
bcp = temporary_data[row][0].split(' ')
if bcp[0] == bcp_set[i]:
values.append(row)
bcp_row[i] = values
print(bcp_row[i])
'''Create data_label_index to avoid need to match each column'''
for k in range(0,len(data_labels)):
#print(data_labels[k])
for i in range(0,len(temporary_data[0])):
#print(temporary_data[0][i])
if data_labels[k] == temporary_data[0][i].replace('"',''):
data_label_index.append(i)
print(data_label_index)
## This would be going across the columns... why not go down the columns?
# for i in range(1,len(temporary_data)):
# # 1: skip the first line with column headings
# bcp = temporary_data[i][0].split(' ')
# for j in range(0,len(bcp_set)):
# if bcp[0] == bcp_set[j]:
# J = j
"""Collect Values and Attach as a list"""
for i in range(0,len(bcp_set)):
#print(bcp_set[i])
for m in range(0,len(data_labels)):
values = []
for k in range(0,len(bcp_row[i])):
values.append(temporary_data[bcp_row[i][k]][data_label_index[m]])
bcp_datasets[i].append(values)
#print(data_labels[m])
#print(values)
"""
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ PLOTS BELOW HERE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
"""
"""
Plotting Data
"""
import matplotlib.pyplot as plt
# http://matplotlib.org/api/pyplot_api.html
marker_style = ["o","v","s","p","D"] #A list of several possible data markers
marker_color = ["SteelBlue","SeaGreen","GoldenRod","OrangeRed","FireBrick"]
marker_size = [9,10,9,11,9]
#axis_font = {'fontname':'Droid Sans', 'size':'18', 'color':'black', 'weight':'normal'}
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 27.5,
'sans-serif' : 'Arial'}
plt.rc('font', **font)
axes_style = {}
#http://stackoverflow.com/questions/3899980/how-to-change-the-font-size-on-a-matplotlib-plot
#http://matplotlib.org/users/customizing.html
# set tick width
plt.rcParams['axes.linewidth'] = 4
#X-axis
plt.rcParams['xtick.major.size'] = 12
plt.rcParams['xtick.major.width'] = 3
plt.rcParams['xtick.minor.size'] = 6
plt.rcParams['xtick.minor.width'] = 2
#Y-axis
plt.rcParams['ytick.major.size'] = 12
plt.rcParams['ytick.major.width'] = 3
plt.rcParams['ytick.minor.size'] = 6
plt.rcParams['ytick.minor.width'] = 2
#http://stackoverflow.com/questions/14705904/matplotlib-ticks-thickness
plotlegend = 0 #Set to 1 in order for legends to be turned on.
"""
PLOT 1:
X: wfft_Period_nm
Y: V.E.sigma.avg / V.W.avg.avg
"""
plot_file_1 = "1_LineEdge_StDevNorm_vs_Period"
x_axis_label = "Period (nm)"
y_axis_label = "Line Edge: St.Dev/Mean.Width (nm/nm)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "wfft_Period_nm"
b_label = "V.E.sigma.avg"
c_label = "V.W.avg.avg"
d_label = "nm_per_pixel"
a_data = []
b_data = []
c_data = []
d_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
x_points , b_points , c_points = clean_my_data_3(a_data[k],b_data[k],c_data[k])
y_points = []
for i in range(0,len(x_points)):
x_points[i] = float(x_points[i])
y_points.append( float(b_points[i]) / float(c_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.ylim([0,0.25])
plt.savefig(CSV_directory + plot_file_1 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_1 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""BOX PLOT"""
#convert x values to displacements
bp_scale_x = 5
for i in range(0,len(x_boxplot)):
x_avg = sum(x_boxplot[i])/len(x_boxplot[i])
print(x_avg)
for j in range(0,len(x_boxplot[i])):
x_boxplot[i][j] = (x_boxplot[i][j] - x_avg)/bp_scale_x + i + 1
#print(y_boxplot)
#print(x_boxplot)
plt.boxplot(y_boxplot)
plt.ylim([0,0.25])
for k in range(0,len(y_boxplot)):
plt.plot(x_boxplot[k],y_boxplot[k], marker_style[k], linestyle="none", markersize=marker_size[k]/2, label=bcp_legend[k], color=marker_color[k], alpha=0.75)
"""Boxplot Labels, etc..."""
if plotlegend: plt.legend(loc='best', numpoints=1)
x_axis_label_bp = "Grouped by Polymer"
plt.xlabel(x_axis_label_bp, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
plt.savefig(CSV_directory + "BP" + plot_file_1 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + "BP" + plot_file_1 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 2:
X: wfft_Period_nm
Y: V.W.sigma.avg / V.W.avg.avg
"""
plot_file_2 = "2_LineWidth_StDevNorm_vs_Period"
x_axis_label = "Period (nm)"
y_axis_label = "Line Width: St.Dev/Mean (nm/nm)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "wfft_Period_nm"
b_label = "V.W.sigma.avg"
c_label = "V.W.avg.avg"
d_label = "nm_per_pixel"
a_data = []
b_data = []
c_data = []
d_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
x_points , b_points , c_points = clean_my_data_3(a_data[k],b_data[k],c_data[k])
y_points = []
for i in range(0,len(x_points)):
x_points[i] = float(x_points[i])
y_points.append( float(b_points[i]) / float(c_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.ylim([0,0.25])
plt.savefig(CSV_directory + plot_file_2 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_2 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""BOX PLOT"""
#convert x values to displacements
bp_scale_x = 5
for i in range(0,len(x_boxplot)):
x_avg = sum(x_boxplot[i])/len(x_boxplot[i])
print(x_avg)
for j in range(0,len(x_boxplot[i])):
x_boxplot[i][j] = (x_boxplot[i][j] - x_avg)/bp_scale_x + i + 1
#print(y_boxplot)
#print(x_boxplot)
plt.boxplot(y_boxplot)
plt.ylim([0,0.25])
for k in range(0,len(y_boxplot)):
plt.plot(x_boxplot[k],y_boxplot[k], marker_style[k], linestyle="none", markersize=marker_size[k]/2, label=bcp_legend[k], color=marker_color[k], alpha=0.75)
"""Boxplot Labels, etc..."""
if plotlegend: plt.legend(loc='best', numpoints=1)
x_axis_label_bp = "Grouped by Polymer"
plt.xlabel(x_axis_label_bp, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
plt.savefig(CSV_directory + "BP" + plot_file_2 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + "BP" + plot_file_2 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 3:
X: wfft_Period_nm
Y: V.W.avg.avg +/- V.W.avg.sigma
"""
plot_file_3 = "3_LineWidth_vs_Period"
x_axis_label = "Period (nm)"
y_axis_label = "Line Width (nm)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "wfft_Period_nm"
b_label = "V.W.avg.avg"
c_label = "V.W.avg.sigma"
d_label = "nm_per_pixel"
a_data = []
b_data = []
c_data = []
d_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
x_points , b_points , c_points, d_points = clean_my_data_4(a_data[k],b_data[k],c_data[k],d_data[k])
y_points = []
yerr_points = []
for i in range(0,len(x_points)):
x_points[i] = float(x_points[i])
y_points.append( float(b_points[i]) * float(d_points[i]) )
yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_points,y_points,yerr=yerr_points, linestyle="none", color=marker_color[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.ylim([0,20])
plt.savefig(CSV_directory + plot_file_3 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_3 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""BOX PLOT"""
#convert x values to displacements
bp_scale_x = 5
for i in range(0,len(x_boxplot)):
x_avg = sum(x_boxplot[i])/len(x_boxplot[i])
print(x_avg)
for j in range(0,len(x_boxplot[i])):
x_boxplot[i][j] = (x_boxplot[i][j] - x_avg)/bp_scale_x + i + 1
#print(y_boxplot)
#print(x_boxplot)
plt.boxplot(y_boxplot)
plt.ylim([0,20])
for k in range(0,len(y_boxplot)):
plt.plot(x_boxplot[k],y_boxplot[k], marker_style[k], linestyle="none", markersize=marker_size[k]/2, label=bcp_legend[k], color=marker_color[k], alpha=0.75)
"""Boxplot Labels, etc..."""
if plotlegend: plt.legend(loc='best', numpoints=1)
x_axis_label_bp = "Grouped by Polymer"
plt.xlabel(x_axis_label_bp, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
plt.savefig(CSV_directory + "BP" + plot_file_3 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + "BP" + plot_file_3 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 4:
X: nm_per_pixel
Y: opa_Hermans
"""
plot_file_4 = "4_Hermans_vs_Resolution"
x_axis_label = "Resolution (nm/pixel)"
y_axis_label = "Herman's 2D Order Parameter (unitless)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "opa_Hermans"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "wfft_Period_px"
a_data = []
b_data = []
c_data = []
d_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
y_points , b_points, c_points = clean_my_data_3(a_data[k],b_data[k], c_data[k])
x_points = []
# yerr_points = []
for i in range(0,len(y_points)):
#x_points[i] = float(x_points[i])
x_points.append( float(b_points[i]) / float(c_points[i]) )
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.ylim([0,1])
plt.savefig(CSV_directory + plot_file_4 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_4 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 5:
X: total wire length measured
Y: opa_Hermans
"""
plot_file_5 = "5_Hermans_vs_Total_Length_nm"
x_axis_label = "Total Length of Lines (um)"
y_axis_label = "Herman's 2D Order Parameter (unitless)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "opa_Hermans"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
y_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(y_points)):
#x_points[i] = float(x_points[i])
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / float(c_points[i]) /1000 )
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.ylim([0,1])
plt.xscale('log')
plt.xlim([1,10000])
plt.savefig(CSV_directory + plot_file_5 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_5 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 6:
X: total wire length measured
Y: correlation_length
"""
plot_file_6 = "6_Correlation_nm_vs_Total_Length_nm"
x_axis_label = "Total Length of Lines (um)"
y_axis_label = "Correlation Length (nm)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "correlation_length"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
a_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
y_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(a_points)):
#x_points[i] = float(x_points[i])
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / float(c_points[i]) /1000 )
y_points.append(float(a_points[i])*float(b_points[i]))
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.yscale('log')
plt.ylim([10,2000])
plt.xscale('log')
plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_6 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_6 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 7:
X: Image Area (um^2)
Y: correlation_length
"""
plot_file_7 = "7_Correlation_nm_vs_ImageArea_um2"
x_axis_label = "Image Area (um )" #insert superscript-2 later
y_axis_label = "Correlation Length (nm)"
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "correlation_length"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
a_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
y_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(a_points)):
#x_points[i] = float(x_points[i])
y_points.append(float(a_points[i])*float(b_points[i]))
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 )
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.yscale('log')
plt.ylim([10,2000])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_7 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_7 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 8:
X: Image Area (um^2)
Y: Defect Density
"""
plot_file_8 = "8_DefectDensity_vs_ImageArea_um2"
x_axis_label = "Image Area (um )" #insert superscript 2 later
y_axis_label = "Defect Pair Density (um )" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "Defect_Density_um"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
for k in range(0,len(bcp_set)):
y_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(y_points)):
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
print(stagger)
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.yscale('log')
plt.ylim([10,3000])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_8 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_8 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 9: (Averaged)
X: Image Area (um^2)
Y: Defect Density
"""
plot_file_9 = "9_DefectDensity_vs_ImageArea_um2"
x_axis_label = "Image Area (um )" #insert superscript 2 later
y_axis_label = "Defect Pair Density (um )" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "Defect_Density_um"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
yerr_points = []
for k in range(0,len(bcp_set)):
y_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(y_points)):
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
#print(stagger)
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
# GROUP & AVERAGE
x_new = []
for i in range(0,len(x_points)):
count = 0
for j in range(0,len(x_points)):
if x_points[i] == x_points[j]:
count += 1
if x_points[i] not in x_new and count > 1:
x_new.append(x_points[i])
y_new = []
yerr_new_neg = [] #for log plots
yerr_new = []
for j in range(0,len(x_new)):
y_temp = []
for i in range(0,len(x_points)):
if x_points[i] == x_new[j]:
y_temp.append(y_points[i])
y_new.append(np.mean(y_temp))
yerr_new.append(np.std(y_temp))
if np.mean(y_temp) - np.std(y_temp) <= 0:
y_neg_std = np.mean(y_temp) - 0.01
else:
y_neg_std = np.std(y_temp)
yerr_new_neg.append(y_neg_std)
x_points = x_new
y_points = y_new
yerr_points.append(yerr_new)
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_points,y_points,yerr=[yerr_new_neg, yerr_new], linestyle="none", color=marker_color[k])
print(bcp_legend[k])
print(x_points)
print(yerr_points[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='best', numpoints=1)
plt.yscale('log')
plt.ylim([10,3000])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_9 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_9 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 10: (Averaged)
X: Image Area (um^2)
Y: Defect Density
"""
plot_file_10 = "10_DefectDensity_vs_ImageArea_um2"
x_axis_label = "Image Area (um )" #insert superscript 2 later
y_axis_label = "Defect Pair Density (um )" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "Defect_Density_um"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
yerr_points = []
for k in range(0,len(bcp_set)):
y_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(y_points)):
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
#print(stagger)
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
# GROUP & AVERAGE
x_new = []
for i in range(0,len(x_points)):
count = 0
for j in range(0,len(x_points)):
if x_points[i] == x_points[j]:
count += 1
if x_points[i] not in x_new and count > 1:
x_new.append(x_points[i])
x_new.sort()
y_new = []
yerr_new_neg = [] #for log plots
yerr_new = []
for j in range(0,len(x_new)):
y_temp = []
for i in range(0,len(x_points)):
if x_points[i] == x_new[j]:
y_temp.append(y_points[i])
y_new.append(np.mean(y_temp))
yerr_new.append(np.std(y_temp))
if np.mean(y_temp) - np.std(y_temp) <= 0:
y_neg_std = np.mean(y_temp) - 0.01
else:
y_neg_std = np.std(y_temp)
yerr_new_neg.append(y_neg_std)
#x_points = x_new
#y_points = y_new
yerr_points.append(yerr_new)
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.2)
plt.plot(x_new, y_new, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_new,y_new,yerr=[yerr_new_neg, yerr_new], linestyle="--", color=marker_color[k])
print(bcp_legend[k])
print(x_points)
print(yerr_points[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='top right', numpoints=1, bbox_to_anchor=(1, 1))
plt.yscale('log')
plt.ylim([10,3000])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_10 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_10 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 11: (Averaged)
X: Image Area (um^2)
Y: Correlation Length
Description: Averaged with y-error bars, overlaid on semi-transparent raw datapoints.
"""
plot_file_11 = "11_CorrelationLength_vs_ImageArea_um2"
x_axis_label = "Image Area (um^2)" #insert superscript 2 later
y_axis_label = "Correlation Length (nm)" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "correlation_length"
b_label = "nm_per_pixel"
c_label = "wfft_Period_nm"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
yerr_points = []
for k in range(0,len(bcp_set)):
a_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
y_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(a_points)):
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
#print(stagger)
y_points.append(float(a_points[i])*float(b_points[i]))
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
# GROUP & AVERAGE
x_new = []
for i in range(0,len(x_points)):
count = 0
for j in range(0,len(x_points)):
if x_points[i] == x_points[j]:
count += 1
if x_points[i] not in x_new and count > 1:
x_new.append(x_points[i])
x_new.sort()
y_new = []
yerr_new_neg = [] #for log plots
yerr_new = []
for j in range(0,len(x_new)):
y_temp = []
for i in range(0,len(x_points)):
if x_points[i] == x_new[j]:
y_temp.append(y_points[i])
y_new.append(np.mean(y_temp))
yerr_new.append(np.std(y_temp))
if np.mean(y_temp) - np.std(y_temp) <= 0:
y_neg_std = np.mean(y_temp) - 0.01
else:
y_neg_std = np.std(y_temp)
yerr_new_neg.append(y_neg_std)
#x_points = x_new
#y_points = y_new
yerr_points.append(yerr_new)
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.2)
plt.plot(x_new, y_new, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_new,y_new,yerr=[yerr_new_neg, yerr_new], linestyle="--", color=marker_color[k])
print(bcp_legend[k])
print(x_points)
print(yerr_points[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='top right', numpoints=1, bbox_to_anchor=(1, 1))
plt.yscale('log')
plt.ylim([10,3000])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_11 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_11 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 12: (Averaged)
X: Image Area (um^2)
Y: LER
Description: Averaged with y-error bars, overlaid on semi-transparent raw datapoints.
"""
plot_file_12 = "12_LineEdge_StDevNorm_vs_ImageArea_um2"
x_axis_label = "Image Area (um^2)" #insert superscript 2 later
y_axis_label = "Line Edge: St.Dev/Mean.Width (nm/nm)" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "V.E.sigma.avg"
b_label = "nm_per_pixel"
c_label = "V.W.avg.avg"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
yerr_points = []
for k in range(0,len(bcp_set)):
a_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
y_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(a_points)):
y_points.append( float(a_points[i]) / float(c_points[i]) )
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
#print(stagger)
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
# GROUP & AVERAGE
x_new = []
for i in range(0,len(x_points)):
count = 0
for j in range(0,len(x_points)):
if x_points[i] == x_points[j]:
count += 1
if x_points[i] not in x_new and count > 1:
x_new.append(x_points[i])
x_new.sort()
y_new = []
yerr_new_neg = [] #for log plots
yerr_new = []
for j in range(0,len(x_new)):
y_temp = []
for i in range(0,len(x_points)):
if x_points[i] == x_new[j]:
y_temp.append(y_points[i])
y_new.append(np.mean(y_temp))
yerr_new.append(np.std(y_temp))
if np.mean(y_temp) - np.std(y_temp) <= 0:
y_neg_std = np.mean(y_temp) - 0.01
else:
y_neg_std = np.std(y_temp)
yerr_new_neg.append(y_neg_std)
#x_points = x_new
#y_points = y_new
yerr_points.append(yerr_new)
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.2)
plt.plot(x_new, y_new, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_new,y_new,yerr=[yerr_new_neg, yerr_new], linestyle="--", color=marker_color[k])
print(bcp_legend[k])
print(x_points)
print(yerr_points[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='top right', numpoints=1, bbox_to_anchor=(1, 1))
#plt.yscale('log')
plt.ylim([0.0,0.25])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_12 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_12 + '.svg')
#plt.show()
plt.cla()
plt.clf()
"""
PLOT 13: (Averaged)
X: Image Area (um^2)
Y: LWR/W
Description: Averaged with y-error bars, overlaid on semi-transparent raw datapoints.
"""
plot_file_13 = "13_LineWidth_StDevNorm_vs_ImageArea_um2"
x_axis_label = "Image Area (um^2)" #insert superscript 2 later
y_axis_label = "Line Edge: St.Dev/Mean.Width (nm/nm)" #insert superscript -2 later
x_points = []
y_points = []
''' Need to ensure that these labels are included in data_labels
Could just set data_labels = labels, but that slows down re-compiling the data'''
a_label = "V.W.sigma.avg"
b_label = "nm_per_pixel"
c_label = "V.W.avg.avg"
d_label = "Width_initial"
e_label = "Height_initial"
a_data = []
b_data = []
c_data = []
d_data = []
e_data = []
for i in range(0,len(data_labels)):
if a_label == data_labels[i]:
i_a = data_label_index[i]
if b_label == data_labels[i]:
i_b = data_label_index[i]
if c_label == data_labels[i]:
i_c = data_label_index[i]
if d_label == data_labels[i]:
i_d = data_label_index[i]
if e_label == data_labels[i]:
i_e = data_label_index[i]
print(data_labels)
print("i_a: " + str(i_a))
print("i_b: " + str(i_b))
print("i_c: " + str(i_c))
print("i_d: " + str(i_d))
print("i_e: " + str(i_e))
## put some error in here if it's still -1,-1
#marker_style = ["s","v","o","p","D"] #A list of several possible data markers
#marker_color = ["red","green","blue","cyan","magenta"]
#marker_size = [9,10,9,11,9]
#colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
## Simple case; no clean-up / filtering required
for k in range(0,len(bcp_set)):
a_values = []
b_values = []
c_values = []
d_values = []
e_values = []
for i in range(0,len(bcp_row[k])):
#print(bcp_row[k])
#print(ix)
#print(iy)
#a_values.append(float(temporary_data[bcp_row[k][i]][i_a]))
#b_values.append(float(temporary_data[bcp_row[k][i]][i_b]))
#c_values.append(float(temporary_data[bcp_row[k][i]][i_c]))
#d_values.append(float(temporary_data[bcp_row[k][i]][i_d]))
a_values.append(temporary_data[bcp_row[k][i]][i_a])
b_values.append(temporary_data[bcp_row[k][i]][i_b])
c_values.append(temporary_data[bcp_row[k][i]][i_c])
d_values.append(temporary_data[bcp_row[k][i]][i_d])
e_values.append(temporary_data[bcp_row[k][i]][i_e])
a_data.append(a_values)
b_data.append(b_values)
c_data.append(c_values)
d_data.append(d_values)
e_data.append(e_values)
y_boxplot = []
x_boxplot = []
yerr_points = []
for k in range(0,len(bcp_set)):
a_points , b_points, c_points, d_points, e_points = clean_my_data_5(a_data[k], b_data[k], c_data[k], d_data[k], e_data[k])
x_points = []
y_points = []
# a_label = "opa_Hermans"
# b_label = "nm_per_pixel"
# c_label = "wfft_Period_nm"
# d_label = "Width_initial"
# e_label = "Height_initial"
for i in range(0,len(a_points)):
y_points.append( float(a_points[i]) / float(c_points[i]) )
#x_points[i] = float(x_points[i])
stagger = (100.0 + 20.0*( float(k+1) - float(len(bcp_set))/2.0 ) / ( float(len(bcp_set))/2.0 ))/100.0
#print(stagger)
x_points.append( float(d_points[i]) * float(e_points[i]) * float(b_points[i])* float(b_points[i]) / 1000000 * stagger)
#yerr_points.append( float(c_points[i]) * float(d_points[i]) )
# GROUP & AVERAGE
x_new = []
for i in range(0,len(x_points)):
count = 0
for j in range(0,len(x_points)):
if x_points[i] == x_points[j]:
count += 1
if x_points[i] not in x_new and count > 1:
x_new.append(x_points[i])
x_new.sort()
y_new = []
yerr_new_neg = [] #for log plots
yerr_new = []
for j in range(0,len(x_new)):
y_temp = []
for i in range(0,len(x_points)):
if x_points[i] == x_new[j]:
y_temp.append(y_points[i])
y_new.append(np.mean(y_temp))
yerr_new.append(np.std(y_temp))
if np.mean(y_temp) - np.std(y_temp) <= 0:
y_neg_std = np.mean(y_temp) - 0.01
else:
y_neg_std = np.std(y_temp)
yerr_new_neg.append(y_neg_std)
#x_points = x_new
#y_points = y_new
yerr_points.append(yerr_new)
y_boxplot.append(y_points)
x_boxplot.append(x_points)
#print(x_points)
#print(y_points)
plt.plot(x_points, y_points, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.2)
plt.plot(x_new, y_new, marker_style[k], linestyle="none", markersize=marker_size[k], label=bcp_legend[k], color=marker_color[k], alpha=0.75)
plt.errorbar(x_new,y_new,yerr=[yerr_new_neg, yerr_new], linestyle="--", color=marker_color[k])
print(bcp_legend[k])
print(x_points)
print(yerr_points[k])
#axis labels
axis_font = {'fontname':'Arial', 'size':'28', 'color':'black', 'weight':'bold'}
plt.xlabel(x_axis_label, **axis_font)
#####plt.ylabel(y_axis_label, **axis_font)
if plotlegend: plt.legend(loc='top right', numpoints=1, bbox_to_anchor=(1, 1))
#plt.yscale('log')
plt.ylim([0.0,0.25])
plt.xscale('log')
#plt.xlim([1,1000])
plt.savefig(CSV_directory + plot_file_13 + '.png', bbox_inches='tight')
plt.savefig(CSV_directory + plot_file_13 + '.svg')
#plt.show()
plt.cla()
plt.clf()
| mit |
matthiaskoenig/sbmlutils | src/sbmlutils/manipulation/interpolation.py | 1 | 13801 | """Create SBML/antimony files for interpolation of datasets.
https://github.com/allyhume/SBMLDataTools
https://github.com/allyhume/SBMLDataTools.git
TODO: fix composition with existing models
TODO: support coupling with existing models via comp
The functionality is very useful, but only if this can be applied to existing
models in a simple manner.
"""
import logging
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import libsbml
import pandas as pd
from sbmlutils.io.sbml import write_sbml
from sbmlutils.validation import validate_doc
logger = logging.getLogger(__name__)
notes = libsbml.XMLNode.convertStringToXMLNode(
"""
<body xmlns='http://www.w3.org/1999/xhtml'>
<h1>Data interpolator</h1>
<h2>Description</h2>
<p>This is a SBML submodel for interpolation of spreadsheet data.</p>
<div class="dc:publisher">This file has been produced by
<a href="https://livermetabolism.com/contact.html" title="Matthias Koenig" target="_blank">Matthias Koenig</a>.
</div>
<h2>Terms of use</h2>
<div class="dc:rightsHolder">Copyright © 2016-2020 sbmlutils.</div>
<div class="dc:license">
<p>Redistribution and use of any part of this model, with or without modification, are permitted provided that
the following conditions are met:
<ol>
<li>Redistributions of this SBML file must retain the above copyright notice, this list of conditions
and the following disclaimer.</li>
<li>Redistributions in a different form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or other materials provided
with the distribution.</li>
</ol>
This model is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.</p>
</div>
</body>
"""
)
# available interpolation methods
INTERPOLATION_CONSTANT = "constant"
INTERPOLATION_LINEAR = "linear"
INTERPOLATION_CUBIC_SPLINE = "cubic spline"
class Interpolator:
"""Interpolator class handles the interpolation of given data series.
Two data series and the type of interpolation are provided.
"""
def __init__(
self,
x: pd.Series,
y: pd.Series,
z: pd.Series = None,
method: str = INTERPOLATION_CONSTANT,
):
self.x: pd.Series = x
self.y: pd.Series = y
self.z: pd.Series = z
self.method = method
def __str__(self) -> str:
"""Convert to string."""
s = (
"--------------------------\n"
"Interpolator<{}>\n"
"--------------------------\n"
"{}\n"
"{}\n"
"formula:\n {}\n".format(self.method, self.x, self.y, self.formula())
)
return s
@property
def xid(self) -> str:
"""X id."""
return str(self.x.name)
@property
def yid(self) -> str:
"""Y id."""
return str(self.y.name)
@property
def zid(self) -> str:
"""Z id."""
return str(self.z.name)
def formula(self) -> str:
"""Get formula string."""
formula: str
if self.method is INTERPOLATION_CONSTANT:
formula = Interpolator._formula_constant(self.x, self.y)
elif self.method is INTERPOLATION_LINEAR:
formula = Interpolator._formula_linear(self.x, self.y)
elif self.method is INTERPOLATION_CUBIC_SPLINE:
formula = Interpolator._formula_cubic_spline(self.x, self.y)
return formula
@staticmethod
def _formula_cubic_spline(x: pd.Series, y: pd.Series) -> str:
"""Get formula for the cubic spline.
This is more complicated and requires the coefficients
from the spline interpolation.
"""
# calculate spline coefficients
coeffs: List[Tuple[float]] = Interpolator._natural_spline_coeffs(x, y)
# create piecewise terms
items: List[str] = []
for k in range(len(x) - 1):
x1 = x.iloc[k]
x2 = x.iloc[k + 1]
(a, b, c, d) = coeffs[k] # type: ignore
formula = f"{d}*(time-{x1})^3 + {c}*(time-{x1})^2 + {b}*(time-{x1}) + {a}" # type: ignore
condition = f"time >= {x1} && time <= {x2}"
s = f"{formula}, {condition}"
items.append(s)
# otherwise
items.append("0.0")
return "piecewise({})".format(", ".join(items))
@staticmethod
def _natural_spline_coeffs(X: pd.Series, Y: pd.Series) -> List[Tuple[float]]:
"""Calculate natural spline coefficients.
Calculation of coefficients for
di*(x - xi)^3 + ci*(x - xi)^2 + bi*(x - xi) + ai
for x in [xi, xi+1]
Natural splines use a fixed second derivative, such that S''(x0)=S''(xn)=0,
whereas clamped splines use fixed bounding conditions for S(x) at x0 and xn.
A trig-diagonal matrix is constructed which can be efficiently solved.
Equations and derivation from:
https://jayemmcee.wordpress.com/cubic-splines/
http://pastebin.com/EUs31Hvh
:return:
:rtype:
"""
np1 = len(X)
n = np1 - 1
a = Y[:]
b = [0.0] * n
d = [0.0] * n
h = [X[i + 1] - X[i] for i in range(n)]
alpha = [0.0] * n
for i in range(1, n):
alpha[i] = 3 / h[i] * (a[i + 1] - a[i]) - 3 / h[i - 1] * (a[i] - a[i - 1])
c = [0.0] * np1
L = [0.0] * np1
u = [0.0] * np1
z = [0.0] * np1
L[0] = 1.0
u[0] = z[0] = 0.0
for i in range(1, n):
L[i] = 2 * (X[i + 1] - X[i - 1]) - h[i - 1] * u[i - 1]
u[i] = h[i] / L[i]
z[i] = (alpha[i] - h[i - 1] * z[i - 1]) / L[i]
L[n] = 1.0
z[n] = c[n] = 0.0
for j in range(n - 1, -1, -1):
c[j] = z[j] - u[j] * c[j + 1]
b[j] = (a[j + 1] - a[j]) / h[j] - (h[j] * (c[j + 1] + 2 * c[j])) / 3
d[j] = (c[j + 1] - c[j]) / (3 * h[j])
# store coefficients
coeffs: List[Tuple[float]] = []
for i in range(n):
coeffs.append((a[i], b[i], c[i], d[i])) # type: ignore
return coeffs
@staticmethod
def _formula_linear(col1: pd.Series, col2: pd.Series) -> str:
"""Linear interpolation between data points.
:return:
:rtype:
"""
items = []
for k in range(len(col1) - 1):
x1 = col1.iloc[k]
x2 = col1.iloc[k + 1]
y1 = col2.iloc[k]
y2 = col2.iloc[k + 1]
m = (y2 - y1) / (x2 - x1)
formula = "{} + {}*(time-{})".format(y1, m, x1)
condition = "time >= {} && time < {}".format(x1, x2)
s = "{}, {}".format(formula, condition)
items.append(s)
# last value after last time
s = "{}, time >= {}".format(col2.iloc[len(col1) - 1], col1.iloc[len(col1) - 1])
items.append(s)
# otherwise
items.append("0.0")
return "piecewise({})".format(", ".join(items))
@staticmethod
def _formula_constant(col1: pd.Series, col2: pd.Series) -> str:
"""Define constant value between data points.
Returns the piecewise formula string for the constant interpolation.
piecewise x1, y1, [x2, y2, ][...][z]
A piecewise function: if (y1), x1.Otherwise, if (y2), x2, etc.Otherwise, z.
"""
items = []
# first value before first time
s = "{}, time < {}".format(col2.iloc[0], col1.iloc[0])
items.append(s)
# intermediate vales
for k in range(len(col1) - 1):
condition = "time >= {} && time < {}".format(col1.iloc[k], col1.iloc[k + 1])
formula = "{}".format(col2.iloc[k])
s = "{}, {}".format(formula, condition)
items.append(s)
# last value after last time
s = "{}, time >= {}".format(col2.iloc[len(col1) - 1], col1.iloc[len(col1) - 1])
items.append(s)
# otherwise
items.append("0.0")
return "piecewise({})".format(", ".join(items))
class Interpolation:
"""Create SBML which interpolates the given data.
The second to last components are interpolated against the first component.
"""
def __init__(self, data: pd.DataFrame, method: str = "linear"):
self.doc: libsbml.SBMLDocument = None
self.model: libsbml.Model = None
self.data: pd.DataFrame = data
self.method: str = method
self.interpolators: List[Interpolator] = []
self.validate_data()
def validate_data(self) -> None:
"""Validate the input data.
* The data is expected to have at least 2 columns.
* The data is expected to have at least three data rows.
* The first column should be in ascending order.
:return:
:rtype:
"""
# more than 1 column required
if len(self.data.columns) < 2:
logger.warning(
"Interpolation data has <2 columns. At least 2 columns required."
)
# at least 3 rows required
if len(self.data) < 3:
logger.warning("Interpolation data <3 rows. At least 3 rows required.")
# first column has to be ascending (times)
def is_sorted(df: pd.DataFrame, colname: str) -> bool:
return bool(pd.Index(df[colname]).is_monotonic)
if not is_sorted(self.data, colname=self.data.columns[0]):
logger.warning("First column should contain ascending values.")
self.data = self.data.sort_values(by=self.data.columns[0])
@staticmethod
def from_csv(
csv_file: Union[Path, str], method: str = "linear", sep: str = ","
) -> "Interpolation":
"""Interpolation object from csv file."""
data: pd.DataFrame = pd.read_csv(csv_file, sep=sep)
return Interpolation(data=data, method=method)
@staticmethod
def from_tsv(tsv_file: Union[Path, str], method: str = "linear") -> "Interpolation":
"""Interpolate object from tsv file."""
return Interpolation.from_csv(csv_file=tsv_file, method=method, sep="\t")
# --- SBML & Interpolation --------------------
def write_sbml_to_file(self, sbml_out: Path) -> None:
"""Write the SBML file.
:param sbml_out: Path to SBML file
:return:
"""
self._create_sbml()
write_sbml(doc=self.doc, filepath=sbml_out)
def write_sbml_to_string(self) -> str:
"""Write the SBML file.
:return: SBML str
"""
self._create_sbml()
return write_sbml(self.doc, filepath=None)
def _create_sbml(self) -> None:
"""Create the SBMLDocument."""
self._init_sbml_model()
self.interpolators = Interpolation.create_interpolators(self.data, self.method)
for interpolator in self.interpolators:
Interpolation.add_interpolator_to_model(interpolator, self.model)
# validation of SBML document
validate_doc(self.doc, units_consistency=False)
def _init_sbml_model(self) -> None:
"""Create and initialize the SBML model."""
# FIXME: support arbitrary levels and versions
sbmlns = libsbml.SBMLNamespaces(3, 1)
sbmlns.addPackageNamespace("comp", 1)
doc: libsbml.SBMLDocument = libsbml.SBMLDocument(sbmlns)
doc.setPackageRequired("comp", True)
self.doc = doc
model: libsbml.Model = doc.createModel()
model.setNotes(notes)
model.setId(f"Interpolation_{self.method}")
model.setName(f"Interpolation_{self.method}")
self.model = model
@staticmethod
def create_interpolators(data: pd.DataFrame, method: str) -> List[Interpolator]:
"""Create all interpolators for the given data set.
The columns 1, ... (Ncol-1) are interpolated against
column 0.
"""
interpolators: List[Interpolator] = []
columns = data.columns
time = data[columns[0]]
for k in range(1, len(columns)):
interpolator = Interpolator(x=time, y=data[columns[k]], method=method)
interpolators.append(interpolator)
return interpolators
@staticmethod
def add_interpolator_to_model(
interpolator: "Interpolator", model: libsbml.Model
) -> None:
"""Add interpolator to model.
The parameters, formulas and rules have to be added to the SBML model.
:param interpolator:
:param model: Model
:return:
"""
# create parameter
pid = interpolator.yid
# if parameter exists remove it
if model.getParameter(pid):
logger.warning(
"Model contains parameter: {}. Parameter is removed.".format(pid)
)
model.removeParameter(pid)
# if assignment rule exists remove it
for rule in model.getListOfRules():
if rule.isAssignment():
if rule.getVariable() == pid:
model.removeRule(rule)
break
p = model.createParameter()
p.setId(pid)
p.setName(pid)
p.setConstant(False)
# create rule
rule = model.createAssignmentRule()
rule.setVariable(pid)
formula = interpolator.formula()
ast_node = libsbml.parseL3FormulaWithModel(formula, model)
if ast_node is None:
logger.warning(libsbml.getLastParseL3Error())
else:
rule.setMath(ast_node)
# TODO: add ports for connection with other model
| lgpl-3.0 |
fabianp/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
uci-cbcl/tree-hmm | treehmm/__init__.py | 2 | 65101 | #!/usr/bin/env python
"""
Variational Bayes method to solve phylgoenetic HMM for histone modifications
Need to:
* Preprocess
** load each dataset
** call significant sites for each dataset (vs. one control dataset)
** save out resulting histogrammed data
* Learn
** Init parameters randomly
** E step: optimize each q_{ij} for fixed \Theta
** M step: optimize \Theta for fixed Q
for a complete hg19, we have:
T = 15,478,482
I = 9
K = 15
L = 9
\Theta is:
e = K * 2**L
\theta = K**2 * K
\alpha = K * K
\beta = K * K
\gamma = K
X = I * T * L * 1 byte for bool => 2050 MB RAM
for mf:
Q = I * T * K * 4 bytes for float64 =>
15181614 * 9 * 15 * (4 bytes) / 1e6 = 8198 MB RAM
\therefore should be okay for 12GB RAM
for poc:
\Theta = T * K * K * 4 bytes => 30 GB RAM
Q = I * T * K * 4 bytes => 24 GB RAM
Q_pairs = I * T * K * K * 4 bytes => :(
Chromosome 1:
T = 1246254
=> Q = .9 GB
=> Q_pairs = 8.9 GB
=> X = .1 GB
"""
import argparse
import sys
import operator
import glob
import urllib
import os
import hashlib
import multiprocessing
import time
import cPickle as pickle
import copy
import re
import tarfile
from cStringIO import StringIO
import time
import random
try:
import pysam
except ImportError:
print 'pysam not installed. Cannot convert data'
import scipy as sp
from scipy.stats import poisson
import scipy.io
import scipy.signal
try:
import matplotlib
matplotlib.use('Agg', warn=False)
#matplotlib.rc('text', usetex=True)
#matplotlib.rc('ps', usedistiller='xpdf')
from matplotlib import pyplot
allow_plots = True
except ImportError:
allow_plots = False
print 'matplotlib not installed. Cannot plot!'
sp.seterr(all='raise')
sp.seterr(under='print')
#sp.random.seed([5])
from treehmm.static import valid_species, valid_marks, mark_avail, phylogeny, inference_types, float_type
from treehmm import vb_mf
from treehmm import vb_prodc
#import loopy_bp
from treehmm import loopy_bp
from treehmm import clique_hmm
#import concatenate_hmm
from treehmm import vb_gmtkExact_continuous as vb_gmtkExact
from treehmm import vb_independent
from treehmm.plot import plot_params, plot_data, plot_Q, plot_energy, plot_energy_comparison
from treehmm.do_parallel import do_parallel_inference
def main(argv=sys.argv[1:]):
"""run a variational EM algorithm"""
# parse arguments, then call convert_data or do_inference
parser = make_parser()
args = parser.parse_args(argv)
if not hasattr(args, 'mark_avail'):
args.mark_avail = mark_avail
elif isinstance(args.mark_avail, basestring):
args.mark_avail = sp.load(args.mark_avail)
if args.func == do_inference:
# allow patterns on the command line
global phylogeny
args.phylogeny = eval(args.phylogeny)
phylogeny = args.phylogeny
all_obs = []
for obs_pattern in args.observe_matrix:
obs_files = glob.glob(obs_pattern)
if len(obs_files) == 0:
parser.error('No files matched the pattern %s' % obs_pattern)
all_obs.extend(obs_files)
args.observe_matrix = all_obs
if args.approx == 'gmtk':
args.subtask = False
obs_mat = args.observe_matrix
args.observe_matrix = args.observe_matrix[0]
init_args_for_inference(args)
args.observe_matrix = obs_mat
del args.func
vb_gmtkExact.mark_avail = args.mark_avail
vb_gmtkExact.run_gmtk_lineagehmm(args)
return
if len(args.observe_matrix) > 1:
print 'parallel inference on %s jobs' % len(args.observe_matrix)
args.func = do_parallel_inference
else:
args.subtask = False
args.observe_matrix = args.observe_matrix[0]
args.observe = os.path.split(args.observe_matrix)[1]
args.func = do_inference
if args.range_k is not None:
out_template = args.out_dir + "_rangeK"
for args.K in eval(args.range_k):
print 'trying K=', args.K
args.out_dir = out_template
args.func(args)
return
args.func(args) # do inference, downloading, or data conversion
def do_inference(args):
"""Perform the type of inference specified in args"""
# set up
if args.quiet_mode:
sys.stdout = open('log_%s.log' , 'a')
init_args_for_inference(args)
print 'done making args'
args.out_dir = args.out_dir.format(timestamp=time.strftime('%x_%X').replace('/','-'), **args.__dict__)
try:
print 'making', args.out_dir
os.makedirs(args.out_dir)
except OSError:
pass
if not args.subtask:
args.iteration = '0_initial'
plot_params(args)
if args.plot_iter >= 2:
plot_data(args)
for i in xrange(1, args.max_iterations+1):
if not args.subtask:
args.iteration = i
print 'iteration', i
# run a few times rather than checking free energy
for j in xrange(1, args.max_E_iter+1 if args.approx != 'clique' else 2):
args.update_q_func(args)
if args.approx !='loopy':
f = args.free_energy_func(args)
print 'free energy after %s E steps' % j, f
try:
print abs(args.last_free_energy - f) / args.last_free_energy
if abs(abs(args.last_free_energy - f) / args.last_free_energy) < args.epsilon_e:
args.last_free_energy = f
break
args.last_free_energy = f
except: # no previous free energy
args.last_free_energy = f
else:
print 'loopy %s E steps' %j
if loopy_bp.bp_check_convergence(args):
args.last_free_energy = f = abs(args.free_energy_func(args))
break
print '# saving Q distribution'
if args.continuous_observations:
for k in range(args.K):
print 'means[%s,:] = ' % k, args.means[k,:]
print 'stdev[%s,:] = ' % k, sp.sqrt(args.variances[k,:])
if args.save_Q >= 2:
for p in args.Q_to_save:
sp.save(os.path.join(args.out_dir,
args.out_params.format(param=p, **args.__dict__)),
args.__dict__[p])
if args.subtask:
# save the weights without renormalizing
print 'saving weights for parameters'
args.update_param_func(args, renormalize=False)
#plot_Q(args)
args.free_energy.append(args.last_free_energy)
for p in args.params_to_save:
sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)),
args.__dict__[p])
break
else:
# optimize parameters with new Q
args.update_param_func(args)
f = args.free_energy_func(args)
try:
if args.approx != 'clique':
print abs(args.last_free_energy - f) / args.last_free_energy
if abs(abs(args.last_free_energy - f) / args.last_free_energy) < args.epsilon:
args.last_free_energy = f
break
args.last_free_energy = f
except: # no previous free energy
args.last_free_energy = f
#args.last_free_energy = args.free_energy_func(args)
args.free_energy.append(args.last_free_energy)
print 'free energy after M-step', args.free_energy[-1]
# save current parameter state
for p in args.params_to_save:
sp.save(os.path.join(args.out_dir,
args.out_params.format(param=p, **args.__dict__)),
args.__dict__[p])
if args.plot_iter != 0 and i % args.plot_iter == 0:
plot_params(args)
plot_energy(args)
if args.plot_iter >= 2:
plot_Q(args)
#import ipdb; ipdb.set_trace()
if args.compare_inf is not None:
args.log_obs_mat = sp.zeros((args.I,args.T,args.K), dtype=float_type)
vb_mf.make_log_obs_matrix(args)
if 'mf' in args.compare_inf:
tmpargs = copy.deepcopy(args)
tmpargs.Q = vb_mf.mf_random_q(args.I,args.T,args.K)
print 'comparing '
for j in xrange(1, args.max_E_iter+1):
vb_mf.mf_update_q(tmpargs)
if vb_mf.mf_check_convergence(tmpargs):
break
print 'mf convergence after %s iterations' % j
e = vb_mf.mf_free_energy(tmpargs)
args.cmp_energy['mf'].append(e)
if 'poc' in args.compare_inf:
tmpargs = copy.deepcopy(args)
if args.approx != 'poc':
tmpargs.Q, tmpargs.Q_pairs = vb_prodc.prodc_initialize_qs(args.theta, args.alpha, args.beta,
args.gamma, args.emit_probs, args.X, args.log_obs_mat)
for j in xrange(1, args.max_E_iter+1):
vb_prodc.prodc_update_q(tmpargs)
if vb_mf.mf_check_convergence(tmpargs):
break
print 'poc convergence after %s iterations' % j
e = vb_prodc.prodc_free_energy(tmpargs)
args.cmp_energy['poc'].append(e)
#sp.io.savemat(os.path.join(args.out_dir, 'Artfdata_poc_inferred_params_K{K}_{T}.mat'.format(K=args.K, T=args.max_bins)), dict(alpha = args.alpha, theta=args.theta, beta=args.beta, gamma=args.gamma, emit_probs=args.emit_probs))
if 'pot' in args.compare_inf:
#del args.cmp_energy['pot']
pass
if 'concat' in args.compare_inf:
#del args.cmp_energy['concat']
pass
if 'clique' in args.compare_inf:
tmpargs = copy.deepcopy(args)
if args.approx != 'clique':
clique_hmm.clique_init_args(tmpargs)
for j in xrange(1):
clique_hmm.clique_update_q(tmpargs)
e = clique_hmm.clique_likelihood(tmpargs)
args.cmp_energy['clique'].append(-e)
if 'loopy' in args.compare_inf:
tmpargs = copy.deepcopy(args)
if args.approx != 'loopy':
tmpargs.lmds, tmpargs.pis = loopy_bp.bp_initialize_msg(args.I, args.T, args.K, args.vert_children)
for j in xrange(1, args.max_E_iter+1):
loopy_bp.bp_update_msg_new(tmpargs)
if loopy_bp.bp_check_convergence(tmpargs):
break
print 'loopy convergence after %s iterations' % j
#e = loopy_bp.bp_bethe_free_energy(tmpargs)
e = loopy_bp.bp_mf_free_energy(tmpargs)
args.cmp_energy['loopy'].append(e)
if args.plot_iter != 0:
plot_energy_comparison(args)
# save the final parameters and free energy to disk
print 'done iteration'
if args.save_Q >= 1:
for p in args.Q_to_save:
sp.save(os.path.join(args.out_dir,
args.out_params.format(param=p, **args.__dict__)),
args.__dict__[p])
for p in args.params_to_save:
sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)),
args.__dict__[p])
#pickle.dump(args, os.path.join(args.out_dir, args.out_params.format(param='args', **args.__dict__)))
print 'done savin'
if not args.subtask and args.plot_iter != 0:
plot_energy(args)
plot_params(args)
plot_Q(args)
#scipy.io.savemat('poc_inferred_params_K{K}_{T}.mat'.format(K=args.K, T=args.max_bins), dict(alpha = args.alpha, theta=args.theta, beta=args.beta, gamma=args.gamma, emit_probs=args.emit_probs))
def init_args_for_inference(args):
"""Initialize args with inference variables according to learning method"""
# read in the datafiles to X array
print '# loading observations'
X = sp.load(args.observe_matrix)
if args.max_bins is not None:
X = X[:, :args.max_bins, :]
if args.max_species is not None:
X = X[:args.max_species, :, :]
args.X = X
args.I, args.T, args.L = X.shape
if args.X.dtype != scipy.int8:
args.continuous_observations = True
print 'Inference for continuous observations'
args.X = X.astype(float_type)
#args.means = sp.rand(args.K, args.L)
#args.variances = sp.rand(args.K, args.L)
args.means, args.variances = initialize_mean_variance(args)
else:
args.continuous_observations = False
print 'Inference for discrete observations'
match = re.search(r'\.i(\d+)\.', args.observe_matrix)
args.real_species_i = int(match.groups()[0]) if match and args.I == 1 else None
args.free_energy = []
make_tree(args)
args.Q_to_save = ['Q']
#if args.approx == 'poc':
# args.Q_to_save += ['Q_pairs']
#elif args.approx == 'clique':
# args.Q_to_save += ['clq_Q', 'clq_Q_pairs']
args.params_to_save = ['free_energy', 'alpha', 'gamma', 'last_free_energy']
if True: #args.approx not in ['clique', 'concat']:
args.params_to_save += ['theta', 'beta']
if args.continuous_observations:
args.params_to_save += ['means', 'variances']
else:
args.params_to_save += ['emit_probs', 'emit_sum']
if args.compare_inf is not None:
if 'all' in args.compare_inf:
args.compare_inf = inference_types
args.cmp_energy = dict((inf, []) for inf in args.compare_inf if inf not in ['pot', 'concat'])
args.params_to_save += ['cmp_energy']
if args.warm_start: # need to load params
print '# loading previous params for warm start from %s' % args.warm_start
tmpargs = copy.deepcopy(args)
tmpargs.out_dir = args.warm_start
#tmpargs.observe = 'all.npy'
args.free_energy, args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum = load_params(tmpargs)
try:
args.free_energy = list(args.free_energy)
except TypeError: # no previous free energy
args.free_energy = []
print 'done'
elif args.subtask: # params in args already
print '# using previous params from parallel driver'
else:
print '# generating random parameters'
(args.theta, args.alpha, args.beta, args.gamma, args.emit_probs) = \
random_params(args.I, args.K, args.L, args.separate_theta)
if args.continuous_observations:
del args.emit_probs
if args.approx == 'mf': # mean-field approximation
if not args.subtask or args.iteration == 0:
args.Q = vb_mf.mf_random_q(args.I,args.T,args.K)
#else:
# q_path = os.path.join(args.out_dir, args.out_params.format(param='Q', **args.__dict__))
# print 'loading previous Q from %s' % q_path
# args.Q = sp.load(q_path)
args.log_obs_mat = sp.zeros((args.I,args.T,args.K), dtype=float_type)
vb_mf.make_log_obs_matrix(args)
args.update_q_func = vb_mf.mf_update_q
args.update_param_func = vb_mf.mf_update_params
args.free_energy_func = vb_mf.mf_free_energy
args.converged_func = vb_mf.mf_check_convergence
elif args.approx == 'poc': # product-of-chains approximation
if not args.separate_theta:
import vb_prodc
else:
import vb_prodc_sepTheta as vb_prodc
args.log_obs_mat = sp.zeros((args.I,args.T,args.K), dtype=float_type)
if args.continuous_observations:
vb_mf.make_log_obs_matrix_gaussian(args)
else:
vb_mf.make_log_obs_matrix(args)
if not args.subtask or args.iteration == 0:
print '# generating Qs'
args.Q, args.Q_pairs = vb_prodc.prodc_initialize_qs(args.theta, args.alpha, args.beta,
args.gamma, args.X, args.log_obs_mat)
#else:
# q_path = os.path.join(args.out_dir, args.out_params.format(param='Q', **args.__dict__))
# print 'loading previous Q from %s' % q_path
# args.Q = sp.load(q_path)
# args.Q_pairs = sp.load(os.path.join(args.out_dir, args.out_params.format(param='Q_pairs', **args.__dict__)))
args.update_q_func = vb_prodc.prodc_update_q
args.update_param_func = vb_prodc.prodc_update_params
args.free_energy_func = vb_prodc.prodc_free_energy
args.converged_func = vb_mf.mf_check_convergence
elif args.approx == 'indep': # completely independent chains
args.log_obs_mat = sp.zeros((args.I, args.T, args.K), dtype=float_type)
if args.continuous_observations:
vb_mf.make_log_obs_matrix_gaussian(args)
else:
vb_mf.make_log_obs_matrix(args)
if not args.subtask or args.iteration == 0:
print '# generating Qs'
args.Q = sp.zeros((args.I, args.T, args.K), dtype=float_type)
args.Q_pairs = sp.zeros((args.I, args.T, args.K, args.K), dtype=float_type)
vb_independent.independent_update_qs(args)
#else:
# q_path = os.path.join(args.out_dir, args.out_params.format(param='Q', **args.__dict__))
# print 'loading previous Q from %s' % q_path
# args.Q = sp.load(q_path)
# args.Q_pairs = sp.load(os.path.join(args.out_dir, args.out_params.format(param='Q_pairs', **args.__dict__)))
args.update_q_func = vb_independent.independent_update_qs
args.update_param_func = vb_independent.independent_update_params
args.free_energy_func = vb_independent.independent_free_energy
args.converged_func = vb_mf.mf_check_convergence
elif args.approx == 'pot': # product-of-trees approximation
raise NotImplementedError("Product of Trees is not implemented yet!")
elif args.approx == 'clique':
if args.separate_theta:
raise RuntimeError('separate_theta not implemented yet for clique')
print 'making cliqued Q'
args.Q = sp.zeros((args.I, args.T, args.K), dtype=float_type)
clique_hmm.clique_init_args(args)
args.update_q_func = clique_hmm.clique_update_q
args.update_param_func = clique_hmm.clique_update_params
args.free_energy_func = clique_hmm.clique_likelihood
args.converged_func = vb_mf.mf_check_convergence
elif args.approx == 'concat':
raise NotImplementedError("Concatenated HMM is not implemented yet!")
elif args.approx == 'loopy':
if args.separate_theta:
raise RuntimeError('separate_theta not implemented yet for clique')
if not args.subtask or args.iteration == 0:
args.Q = vb_mf.mf_random_q(args.I, args.T, args.K)
#else:
# q_path = os.path.join(args.out_dir, args.out_params.format(param='Q', **args.__dict__))
# print 'loading previous Q from %s' % q_path
# args.Q = sp.load(q_path)
args.lmds, args.pis = loopy_bp.bp_initialize_msg(args)
args.log_obs_mat = sp.zeros((args.I,args.T,args.K), dtype=float_type)
vb_mf.make_log_obs_matrix(args)
args.update_q_func = loopy_bp.bp_update_msg_new
args.update_param_func = loopy_bp.bp_update_params_new
#args.free_energy_func = loopy_bp.bp_bethe_free_energy
args.free_energy_func = loopy_bp.bp_mf_free_energy
args.converged_func = loopy_bp.bp_check_convergence
elif args.approx == 'gmtk':
pass
else:
raise RuntimeError('%s not recognized as valid inference method!' % args.approx)
def distance(x1, x2):
return scipy.sqrt((x1 - x2) * (x1 - x2)).sum()
def initialize_mean_variance(args):
"""Initialize the current mean and variance values semi-intelligently.
Inspired by the kmeans++ algorithm: iteratively choose new centers from the data
by weighted sampling, favoring points that are distant from those already chosen
"""
X = args.X.reshape(args.X.shape[0] * args.X.shape[1], args.X.shape[2])
# kmeans++ inspired choice
centers = [random.choice(X)]
min_dists = scipy.array([distance(centers[-1], x) for x in X])
for l in range(1, args.K):
weights = min_dists * min_dists
new_center = weighted_sample(zip(weights, X), 1).next()
centers.append(new_center)
min_dists = scipy.fmin(min_dists, scipy.array([distance(centers[-1], x) for x in X]))
means = scipy.array(centers)
# for the variance, get the variance of the data in this cluster
variances = []
for c in centers:
idxs = tuple(i for i, (x, m) in enumerate(zip(X, min_dists)) if distance(c, x) == m)
v = scipy.var(X[idxs, :], axis=0)
variances.append(v)
variances = scipy.array(variances) + args.pseudocount
#import pdb; pdb.set_trace()
#for k in range(args.K):
# print sp.sqrt(variances[k,:])
variances[variances < .1] = .1
return means, variances
def weighted_sample(items, n):
total = float(sum(w for w, v in items))
i = 0
w, v = items[0]
while n:
x = total * (1 - random.random() ** (1.0 / n))
total -= x
while x > w:
x -= w
i += 1
w, v = items[i]
w -= x
yield v
n -= 1
def make_parser():
"""Make a parser for variational inference"""
parser = argparse.ArgumentParser()
tasks_parser = parser.add_subparsers()
# parameters for converting datasets from BAM to observation matrix
convert_parser = tasks_parser.add_parser('convert', help='Convert BAM reads'
' into a matrix of observations')
convert_parser.add_argument('--download_first', action='store_true',
help='Download the raw sequence data from UCSC,'
' then convert it.')
convert_parser.add_argument('--base_url', default='http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeBroadHistone/%s',
help='When downloading, string-format the template into this url.')
convert_parser.add_argument('--species', nargs='+', default=valid_species,
help='The set of species with observations. By '
'default, use all species: %(default)s')
convert_parser.add_argument('--marks', nargs='+', default=valid_marks,
help='The set of marks with observations. By '
'default, use all histone marks: %(default)s')
convert_parser.add_argument('--windowsize', type=int, default=200,
help='histogram bin size used in conversion')
convert_parser.add_argument('--chromosomes', nargs='+', default='all',
help='which chromosomes to convert. By default,'
' convert all autosomes')
convert_parser.add_argument('--min_reads', type=float, default=.5,
help='The minimum number of reads for a region to be included. default: %(default)s')
convert_parser.add_argument('--min_size', type=int, default=25,
help='The minimum length (in bins) to include a chunk. default: %(default)s')
convert_parser.add_argument('--max_pvalue', type=float, default=1e-4,
help='p-value threshold to consider the read count'
' significant, using a local poisson rate defined by'
' the control data')
convert_parser.add_argument('--outfile', default='observations.{chrom}.npy',
help='Where to save the binarized reads')
#convert_parser.add_argument('--bam_template', help='bam file template.',
# default='wgEncodeBroadHistone{species}{mark}StdAlnRep*.bam')
convert_parser.add_argument('--bam_template', help='bam file template. default: %(default)s',
default='wgEncode*{species}{mark}StdAlnRep{repnum}.bam')
convert_parser.set_defaults(func=convert_data)
# # to trim off telomeric regions
# trim_parser = tasks_parser.add_parser('trim', help='trim off regions without'
# 'any observations in them')
# trim_parser.add_argument('observe_matrix', nargs='+',
# help='Files to be trimmed (converted from bam'
# ' using "%(prog)s convert" command).')
# trim_parser.set_defaults(func=trim_data)
# to split a converted dataset into pieces
split_parser = tasks_parser.add_parser('split', help='split observations '
'into smaller pieces, retaining only regions with '
'a smoothed minimum read count.')
split_parser.add_argument('observe_matrix', nargs='+',
help='Files containing observed data (converted from bam'
' using "%(prog)s convert" command). If multiple files '
'are specified, each is treated as its own chain but '
'the parameters are shared across all chains')
split_parser.add_argument('start_positions', help='start_positions.pkl file generated during `convert` step.')
#split_parser.add_argument('--chunksize', type=int, default=100000,
# help='the number of bins per chunk. default: %(default)s')
split_parser.add_argument('--min_reads', type=float, default=.5,
help='The minimum number of reads for a region to be included. default: %(default)s')
split_parser.add_argument('--gauss_window_size', type=int, default=200,
help='The size of the gaussian smoothing window. default: %(default)s')
split_parser.add_argument('--min_size', type=int, default=25,
help='The minimum length (in bins) to include a chunk. default: %(default)s')
split_parser.set_defaults(func=split_data)
# parameters for learning and inference with converted observations
infer_parser = tasks_parser.add_parser('infer')
infer_parser.add_argument('K', type=int, help='The number of hidden states'
' to infer')
infer_parser.add_argument('observe_matrix', nargs='+',
help='Files containing observed data (converted from bam'
' using "%(prog)s convert" command). If multiple files '
'are specified, each is treated as its own chain but '
'the parameters are shared across all chains')
infer_parser.add_argument('--approx', choices=inference_types,
default='mf',
help='Which approximation to make in inference')
infer_parser.add_argument('--out_params', default='{approx}_{param}_{observe}',
help='Where to save final parameters')
infer_parser.add_argument('--epsilon', type=float, default=1e-4,
help='Convergence criteria: change in Free energy'
' during M step must be < epsilon')
infer_parser.add_argument('--epsilon_e', type=float, default=1e-3,
help='Convergence criteria: change in Free energy'
' during E step must be < epsilon')
infer_parser.add_argument('--max_iterations', type=int, default=50,
help='Maximum number of EM steps before stopping')
infer_parser.add_argument('--max_E_iter', type=int, default=10,
help='Maximum number of E steps per M step')
infer_parser.add_argument('--max_bins', default=None, type=int,
help='Restrict the total number of bins (T)')
infer_parser.add_argument('--max_species', default=None, type=int,
help='Restrict the total number of species (I)')
infer_parser.add_argument('--pseudocount', type=float_type, default=1e-6,
help='pseudocount to add to each parameter matrix')
infer_parser.add_argument('--plot_iter', type=int, default=1,
help='draw a plot per *plot_iter* iterations.'
'0 => plot only at the end. Default is %(default)s')
infer_parser.add_argument('--out_dir', type=str, default='{run_name}_out/{approx}/I{I}_K{K}_T{T}_{timestamp}',
help='Output parameters and plots in this directory'
' (default: %(default)s')
infer_parser.add_argument('--run_name', type=str, default='infer',
help='name of current run type (default: %(default)s')
infer_parser.add_argument('--num_processes', type=int, default=None,
help='Maximum number of processes to use '
'simultaneously (default: all)')
infer_parser.add_argument('--warm_start', type=str, default=None,
help="Resume iterations using parameters and Q's "
"from a previous run. Q's that are not found are "
"regenerated")
infer_parser.add_argument('--compare_inf', nargs='+', type=str, default=None, choices=inference_types + ['all'],
help="While learning using --approx method, "
"compare the inferred hidden states and energies "
"from these inference methods.")
infer_parser.add_argument('--range_k', type=str, default=None,
help="perform inference over a range of K values. Argument is passed as range(*arg*)")
infer_parser.add_argument('--save_Q', type=int, choices=[0,1,2,3], default=1,
help="Whether to save the inferred marginals for hidden variables. 0 => no saving, 1 => save at end, 2 => save at each iteration. 3 => for parallel jobs, reconstruct the chromsomal Q distribution at each iteration. Default: %(default)s")
infer_parser.add_argument('--quiet_mode', action='store_true', help="Turn off printing for this run")
infer_parser.add_argument('--run_local', action='store_true', help="Force parallel jobs to run on the local computer, even when SGE is available")
infer_parser.add_argument('--separate_theta', action='store_true', help='use a separate theta matrix for each node of the tree (only works for GMTK)')
infer_parser.add_argument('--mark_avail', help='npy matrix of available marks',
default=mark_avail)
infer_parser.add_argument('--phylogeny', help='the phylogeny connecting each species, as a python dictionary with children for keys and parents for values. Note: this does not have to be a singly-rooted or even a bifurcating phylogeny! You may specify multiple trees, chains, stars, etc, but should not have loops in the phylogeny.',
default=str(phylogeny))
infer_parser.add_argument('--chunksize', help='The number of chunks (for convert+split data) or chromosomes (for convert only) to submit to each runner. When running on SGE, you should set this number relatively high (in 100s?) since each job has a very slow startup time. When running locally, this is the number of chunks each subprocess will handle at a time.',
default=1)
infer_parser.set_defaults(func=do_inference)
bed_parser = tasks_parser.add_parser('q_to_bed')
bed_parser.add_argument('q_root_dir', help='Root directory for the Q outputs to convert. '
'Should look something like: infer_out/mf/<timestamp>/')
bed_parser.add_argument('start_positions', help='the pickled offsets generated by `tree-hmm convert` or `tree-hmm split`.',)
bed_parser.add_argument('--bed_template', help='template for bed output files. Default: %(default)s',
default='treehmm_states.{species}.state{state}.bed')
bed_parser.add_argument('--save_probs', action='store_true', help='Instead of saving the most likely state for each bin, record the probability of being in that state at each position. NOTE: this will greatly increase the BED file size!')
bed_parser.set_defaults(func=q_to_bed)
return parser
def q_to_bed(args):
attrs = pickle.load(open(args.start_positions))
windowsize = attrs['windowsize']
start_positions = attrs['start_positions']
valid_species = attrs['valid_species']
valid_marks = attrs['valid_marks']
outfiles = {}
for f in glob.glob(os.path.join(args.q_root_dir, '*_Q_*.npy')):
Q = scipy.load(f)
if not args.save_probs:
best_states = Q.argmax(axis=2)
obs = f.split('_Q_')[1]
I, T, K = Q.shape
chrom, bin_offset = start_positions[obs]
for i in range(I):
for t in range(T):
if args.save_probs:
for k in range(K):
bedline = '\t'.join([chrom, str((bin_offset + t) * windowsize),
str((bin_offset + t + 1) * windowsize),
'{species}.state{k}'.format(species=valid_species[i], k=k),
str(Q[i,t,k]), '+']) + '\n'
if (i,k) not in outfiles:
outfiles[(i,k)] = open(args.bed_template.format(species=valid_species[i], state=k), 'w')
outfiles[(i,k)].write(bedline)
else:
k = best_states[i,t]
bedline = '\t'.join([chrom, str((bin_offset + t) * windowsize),
str((bin_offset + t + 1) * windowsize),
'{species}.state{k}'.format(species=valid_species[i], k=k),
str(Q[i,t,k]), '+']) + '\n'
if (i,k) not in outfiles:
outfiles[(i,k)] = open(args.bed_template.format(species=valid_species[i], state=k), 'w')
outfiles[(i,k)].write(bedline)
def load_params(args):
#print args.out_params
#print args.__dict__.keys()
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='last_free_energy', **args.__dict__))
free_energy = sp.load(os.path.join(args.out_dir, args.out_params.format(param='free_energy', **args.__dict__)))
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='theta', **args.__dict__))
theta = sp.load(os.path.join(args.out_dir, args.out_params.format(param='theta', **args.__dict__)))
if len(theta.shape)==3 and args.separate_theta:
tmp = sp.zeros((args.I-1, args.K, args.K, args.K), dtype=float_type)
for i in range(args.I-1):
tmp[i,:,:,:] = theta
theta = tmp
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='alpha', **args.__dict__))
alpha = sp.load(os.path.join(args.out_dir, args.out_params.format(param='alpha', **args.__dict__)))
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='beta', **args.__dict__))
beta = sp.load(os.path.join(args.out_dir, args.out_params.format(param='beta', **args.__dict__)))
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='gamma', **args.__dict__))
gamma = sp.load(os.path.join(args.out_dir, args.out_params.format(param='gamma', **args.__dict__)))
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='emit_probs', **args.__dict__))
emit_probs = sp.load(os.path.join(args.out_dir, args.out_params.format(param='emit_probs', **args.__dict__)))
#print 'loading from', os.path.join(args.out_dir, args.out_params.format(param='emit_sum', **args.__dict__))
emit_sum = sp.load(os.path.join(args.out_dir, args.out_params.format(param='emit_sum', **args.__dict__)))
return free_energy, theta, alpha, beta, gamma, emit_probs, emit_sum
def make_tree(args):
"""build a tree from the vertical parents specified in args"""
I = args.I
# define the tree structure
#tree_by_parents = {0:sp.inf, 1:0, 2:0} # 3 species, 2 with one parent
#tree_by_parents = {0:sp.inf, 1:0} # 3 species, 2 with one parent
#tree_by_parents = dict((args.species.index(k), args.species.index(v)) for k, v in phylogeny.items())
tree_by_parents = dict((valid_species.index(k), valid_species.index(v))
for k, v in args.phylogeny.items() if
valid_species.index(k) in xrange(I) and
valid_species.index(v) in xrange(I))
tree_by_parents[0] = sp.inf #'Null'
print tree_by_parents.keys()
# [inf, parent(1), parent(2), ...]
global vert_parent
#I = max(tree_by_parents) + 1
vert_parent = sp.array([tree_by_parents[c] if c > 0 else I for c in
xrange(I)], dtype=sp.int8) # error if 0's parent is accessed
args.vert_parent = vert_parent
print 'vert_parent', vert_parent
# args.vert_parent = tree_by_parents
# {inf:0, 0:[1,2], 1:[children(1)], ...}
global vert_children
vert_children = dict((pa, []) for pa in
tree_by_parents.keys())# + tree_by_parents.values())
for pa in tree_by_parents.values():
for ch in tree_by_parents.keys():
if tree_by_parents[ch] == pa:
if pa not in vert_children:
vert_children[pa] = []
if ch not in vert_children[pa]:
vert_children[pa].append(ch)
del vert_children[sp.inf]
for pa in vert_children:
vert_children[pa] = sp.array(vert_children[pa], dtype=sp.int32)
args.vert_children = vert_children
# vert_children = sp.ones(I, dtype = 'object')
# for pa in range(I):
# vert_children[pa] = []
# for child, parent in tree_by_parents.items():
# if pa == parent:
# vert_children[pa].append(child)
# print vert_children
# args.vert_children = vert_children
def random_params(I, K, L, separate_theta):
"""Create and normalize random parameters for inference"""
#sp.random.seed([5])
if separate_theta:
theta = sp.rand(I-1, K, K, K).astype(float_type)
else:
theta = sp.rand(K, K, K).astype(float_type)
alpha = sp.rand(K, K).astype(float_type)
beta = sp.rand(K, K).astype(float_type)
gamma = sp.rand(K).astype(float_type)
emit_probs = sp.rand(K, L).astype(float_type)
vb_mf.normalize_trans(theta, alpha, beta, gamma)
return theta, alpha, beta, gamma, emit_probs
# def trim_data(args):
# """Trim regions without any observations from the start and end of the
# obervation matrices
# """
# for f in args.observe_matrix:
# print '# trimming ', f, 'start is ',
# X = sp.load(f).astype(sp.int8)
# S = X.cumsum(axis=0).cumsum(axis=2) # any species has any observation
# for start_t in xrange(X.shape[1]):
# if S[-1, start_t, -1] > 0:
# break
# for end_t in xrange(X.shape[1] - 1, -1, -1):
# if S[-1, end_t, -1] > 0:
# break
# tmpX = X[:, start_t:end_t, :]
# print start_t
# sp.save(os.path.splitext(f)[0] + '.trimmed', tmpX)
def split_data(args):
"""Split the given observation matrices into smaller chunks"""
sizes = []
total_size = 0
covered_size = 0
attrs = pickle.load(open(args.start_positions))
valid_species = attrs['valid_species']
valid_marks = attrs['valid_marks']
windowsize = attrs['windowsize']
old_starts = attrs['start_positions']
start_positions = {}
for f in args.observe_matrix:
print '# splitting ', f
chrom = old_starts[os.path.split(f)[1]][0]
X = sp.load(f).astype(sp.int8)
total_size += X.shape[1]
#start_ts = xrange(0, X.shape[1], args.chunksize)
#end_ts = xrange(args.chunksize, X.shape[1] + args.chunksize, args.chunksize)
density = X.sum(axis=0).sum(axis=1) # sumation over I, then L
#from ipdb import set_trace; set_trace()
gk = _gauss_kernel(args.gauss_window_size)
smoothed_density = scipy.signal.convolve(density, gk, mode='same')
regions_to_keep = smoothed_density >= args.min_reads
# find the regions where a transition is made from no reads to reads, and reads to no reads
start_ts = sp.where(sp.diff(regions_to_keep.astype(sp.int8)) > 0)[0]
end_ts = sp.where(sp.diff(regions_to_keep.astype(sp.int8)) < 0)[0]
cur_regions = [r for r in zip(start_ts, end_ts) if r[1] - r[0] >= args.min_size]
sizes.extend([end_t - start_t for start_t, end_t in cur_regions])
print 'saving %s regions' % len(sizes)
for chunknum, (start_t, end_t) in enumerate(cur_regions):
covered_size += end_t - start_t
tmpX = X[:, start_t:end_t, :]
name = os.path.splitext(f)[0] + '.chunk%s.npy' % chunknum
sp.save(name, tmpX)
fname = os.path.split(name)[1]
start_positions[fname] = (chrom, start_t)
print '# plotting size distribution'
pyplot.figure()
pyplot.figtext(.5,.01,'%s regions; %s bins total; %s bins covered; coverage = %.3f' % (len(sizes),total_size, covered_size, covered_size / float(total_size)), ha='center')
pyplot.hist(sizes, bins=100)
pyplot.title('chunk sizes for all chroms, min_reads %s, min_size %s, gauss_window_size %s' % (args.min_reads, args.min_size, args.gauss_window_size))
pyplot.savefig('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.gauss_window_size))
with open('start_positions_split.pkl', 'w') as outfile:
attrs = dict(windowsize=windowsize, start_positions=start_positions,
valid_species=valid_species, valid_marks=valid_marks)
pickle.dump(attrs, outfile, -1)
# --min_reads .5 --min_size 25 --window_size 200;
# def extract_local_features(args):
# """extract some local features from the given data, saving an X array with extra dimensions"""
# sizes = []
# total_size = 0
# covered_size = 0
# start_positions = {}
# for f in args.observe_matrix:
# print '# features on ', f
# X = sp.load(f).astype(sp.int8)
# total_size += X.shape[1]
# #start_ts = xrange(0, X.shape[1], args.chunksize)
# #end_ts = xrange(args.chunksize, X.shape[1] + args.chunksize, args.chunksize)
# density = X.sum(axis=0).sum(axis=1) # summation over I, then L
# #from ipdb import set_trace; set_trace()
# gk = _gauss_kernel(args.window_size)
# smoothed_density = scipy.signal.convolve(density, gk, mode='same')
# regions_to_keep = smoothed_density >= args.min_reads
# # find the regions where a transition is made from no reads to reads, and reads to no reads
# start_ts = sp.where(sp.diff(regions_to_keep.astype(sp.int8)) > 0)[0]
# end_ts = sp.where(sp.diff(regions_to_keep.astype(sp.int8)) < 0)[0]
# cur_regions = [r for r in zip(start_ts, end_ts) if r[1] - r[0] >= args.min_size]
# sizes.extend([end_t - start_t for start_t, end_t in cur_regions])
# print 'saving %s regions' % len(sizes)
# for chunknum, (start_t, end_t) in enumerate(cur_regions):
# covered_size += end_t - start_t
# tmpX = X[:, start_t:end_t, :]
# name = os.path.splitext(f)[0] + '.chunk%s.npy' % chunknum
# sp.save(name, tmpX)
# start_positions[name] = start_t
# print '# plotting size distribution'
# pyplot.figure()
# pyplot.figtext(.5,.01,'%s regions; %s bins total; %s bins covered; coverage = %.3f' % (len(sizes),total_size, covered_size, covered_size / float(total_size)), ha='center')
# pyplot.hist(sizes, bins=100)
# pyplot.title('chunk sizes for all chroms, min_reads %s, min_size %s, window_size %s' % (args.min_reads, args.min_size, args.window_size))
# pyplot.savefig('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.window_size))
# pickle.dump(start_positions, open('start_positions.pkl', 'w'))
# # --min_reads .5 --min_size 25 --window_size 200;
# def convert_data_continuous_features_and_split(args):
# """histogram both treatment and control data as specified by args
# This saves the complete X matrix
# This version doesn't binarize the data, smooths out the read signal (gaussian convolution)
# and adds derivative information
# """
# if args.download_first:
# download_data(args)
# I = len(args.species)
# L = len(args.marks)
# final_data = None
# total_size = 0
# covered_size = 0
# start_positions = {}
# # make sure all the data is present...
# for species in args.species:
# for mark in args.marks:
# d_files = [f for f in glob.glob(args.bam_template.format(
# species=species, mark=mark))]
# if len(d_files) == 0:
# print("No histone data for species %s mark %s Expected: %s" %
# (species, mark, args.bam_template.format(
# species=species, mark=mark)))
# for i, species in enumerate(args.species):
# for l, mark in enumerate(args.marks):
# d_obs = {}
# d_files = [f for f in glob.glob(args.bam_template.format(
# species=species, mark=mark))]
# if len(d_files) == 0:
# args.mark_avail[i, l] = 0
# else:
# args.mark_avail[i, l] = 1
# for mark_file in d_files:
# read_counts = histogram_reads(mark_file, args.windowsize, args.chromosomes)
# # d_obs.append(histogram_reads(mark_file, args.windowsize,
# # args.chromosomes))
# for
# d_obs = reduce(operator.add, d_obs) # add all replicants together
# #print 'before per million:', d_obs.sum()
# #d_obs /= (d_obs.sum() / 1e7) # convert to reads mapping per ten million
# # convert to a binary array with global poisson
# #genome_rate = d_obs / (d_obs.sum() / 1e6)
# if final_data is None:
# final_data = sp.zeros((I, len(d_obs), L), dtype=sp.float32)
# final_data[i, :, l] = d_obs
# total_size = final_data.shape[1]
# regions_to_keep = (final_data[:, :, tuple(range(L))].sum(axis=0).sum(axis=1) >= args.min_reads).astype(sp.int8)
# # find the regions where a transition is made from no reads to reads, and reads to no reads
# start_ts = sp.where(sp.diff(regions_to_keep) > 0)[0]
# end_ts = sp.where(sp.diff(regions_to_keep) < 0)[0]
# cur_regions = [r for r in zip(start_ts, end_ts) if r[1] - r[0] >= args.min_size]
# sizes = [end_t - start_t for start_t, end_t in cur_regions]
# print 'saving %s regions' % len(sizes)
# tarout = tarfile.open(args.outfile + '.tar.gz', 'w:gz')
# for chunknum, (start_t, end_t) in enumerate(cur_regions):
# covered_size += end_t - start_t
# tmpX = final_data[:, start_t:end_t, :]
# print 'adding chunk', chunknum, 'of', len(cur_regions)
# s = StringIO()
# sp.save(s, tmpX)
# name = args.outfile + '.chunk%s.npy' % chunknum
# info = tarfile.TarInfo(name)
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# start_positions[name] = start_t
# print '# plotting size distribution'
# pyplot.figure()
# pyplot.figtext(.5,.01,'%s regions; %s bins total; %s bins covered; coverage = %.3f' % (len(sizes),total_size, covered_size, covered_size / float(total_size)), ha='center')
# pyplot.hist(sizes, bins=100)
# pyplot.title('chunk sizes for all chroms, min_reads %s, min_size %s, windowsize %s' % (args.min_reads, args.min_size, args.windowsize))
# pyplot.savefig('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.windowsize))
# s = StringIO()
# pyplot.savefig(s)
# info = tarfile.TarInfo('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.windowsize))
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# s = StringIO()
# pickle.dump(start_positions, s)
# info = tarfile.TarInfo('start_positions.pkl')
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# pickle.dump(start_positions, open('start_positions.pkl', 'w'))
# # --min_reads .5 --min_size 25 --window_size 200;
# s = StringIO()
# sp.save(s, args.mark_avail)
# info = tarfile.TarInfo('available_marks.npy')
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# pickle.dump(start_positions, open('start_positions.pkl', 'w'))
# # --min_reads .5 --min_size 25 --window_size 200;
# tarout.close()
# print "output file:", args.outfile
# print 'available marks:', args.mark_avail
# #with open(args.outfile, 'wb') as outfile:
# # sp.save(outfile, final_data)
# with open(args.outfile + '.available_marks', 'wb') as outfile:
# sp.save(outfile, args.mark_avail)
# def convert_data_continuous_features_and_split_old(args):
# """histogram both treatment and control data as specified by args
# This saves the complete X matrix
# This version doesn't binarize the data, smooths out the read signal (gaussian convolution)
# and adds derivative information
# """
# if args.download_first:
# download_data(args)
# I = len(args.species)
# L = len(args.marks)
# final_data = None
# total_size = 0
# covered_size = 0
# start_positions = {}
# # make sure all the data is present...
# for species in args.species:
# for mark in args.marks:
# d_files = [f for f in glob.glob(args.bam_template.format(
# species=species, mark=mark))]
# if len(d_files) == 0:
# print("No histone data for species %s mark %s Expected: %s" %
# (species, mark, args.bam_template.format(
# species=species, mark=mark)))
# for i, species in enumerate(args.species):
# for l, mark in enumerate(args.marks):
# l = l * 3
# d_obs = []
# d_files = [f for f in glob.glob(args.bam_template.format(
# species=species, mark=mark))]
# if len(d_files) == 0:
# args.mark_avail[i, l] = 0
# args.mark_avail[i, l+1] = 0
# args.mark_avail[i, l+2] = 0
# else:
# args.mark_avail[i, l] = 1
# args.mark_avail[i, l+1] = 1
# args.mark_avail[i, l+2] = 1
# for mark_file in d_files:
# try:
# d_obs.append(histogram_reads(mark_file, args.windowsize,
# args.chromosomes))
# except ValueError as e:
# print e.message
# print d_obs[-1].sum()
# print d_obs[-1].shape
# d_obs = reduce(operator.add, d_obs) # add all replicants together
# #print 'before per million:', d_obs.sum()
# #d_obs /= (d_obs.sum() / 1e7) # convert to reads mapping per ten million
# # convert to a binary array with global poisson
# genome_rate = d_obs / (d_obs.sum() / 1e6)
# if final_data is None:
# final_data = sp.zeros((I, len(d_obs), L * 3), dtype=sp.float32)
# asinh_obs = sp.log(genome_rate + sp.sqrt(genome_rate * genome_rate + 1))
# gk = _gauss_kernel(3)
# smoothed_obs = scipy.signal.convolve(asinh_obs, gk, mode='same')
# smooth_deriv = sp.gradient(smoothed_obs)
# smooth_deriv2 = sp.gradient(smooth_deriv)
# final_data[i, :, l] = smoothed_obs
# final_data[i, :, l + 1] = smooth_deriv
# final_data[i, :, l + 2] = smooth_deriv2
# total_size = final_data.shape[1]
# regions_to_keep = (final_data[:, :, tuple(range(0, L * 3, 3))].sum(axis=0).sum(axis=1) >= args.min_reads).astype(sp.int8)
# # find the regions where a transition is made from no reads to reads, and reads to no reads
# start_ts = sp.where(sp.diff(regions_to_keep) > 0)[0]
# end_ts = sp.where(sp.diff(regions_to_keep) < 0)[0]
# cur_regions = [r for r in zip(start_ts, end_ts) if r[1] - r[0] >= args.min_size]
# sizes = [end_t - start_t for start_t, end_t in cur_regions]
# print 'saving %s regions' % len(sizes)
# tarout = tarfile.open(args.outfile + '.tar.gz', 'w:gz')
# for chunknum, (start_t, end_t) in enumerate(cur_regions):
# covered_size += end_t - start_t
# tmpX = final_data[:, start_t:end_t, :]
# print 'adding chunk', chunknum, 'of', len(cur_regions)
# s = StringIO()
# sp.save(s, tmpX)
# name = args.outfile + '.chunk%s.npy' % chunknum
# info = tarfile.TarInfo(name)
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# start_positions[name] = start_t
# print '# plotting size distribution'
# pyplot.figure()
# pyplot.figtext(.5,.01,'%s regions; %s bins total; %s bins covered; coverage = %.3f' % (len(sizes),total_size, covered_size, covered_size / float(total_size)), ha='center')
# pyplot.hist(sizes, bins=100)
# pyplot.title('chunk sizes for all chroms, min_reads %s, min_size %s, windowsize %s' % (args.min_reads, args.min_size, args.windowsize))
# pyplot.savefig('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.windowsize))
# s = StringIO()
# pyplot.savefig(s)
# info = tarfile.TarInfo('chunk_sizes.minreads%s.minsize%s.windowsize%s.png' % (args.min_reads, args.min_size, args.windowsize))
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# s = StringIO()
# pickle.dump(start_positions, s)
# info = tarfile.TarInfo('start_positions.pkl')
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# pickle.dump(start_positions, open('start_positions.pkl', 'w'))
# # --min_reads .5 --min_size 25 --window_size 200;
# s = StringIO()
# sp.save(s, args.mark_avail)
# info = tarfile.TarInfo('available_marks.npy')
# info.size = s.tell(); info.mtime = time.time()
# s.seek(0)
# tarout.addfile(info, s)
# pickle.dump(start_positions, open('start_positions.pkl', 'w'))
# # --min_reads .5 --min_size 25 --window_size 200;
# tarout.close()
# print "output file:", args.outfile
# print 'available marks:', args.mark_avail
# #with open(args.outfile, 'wb') as outfile:
# # sp.save(outfile, final_data)
# with open(args.outfile + '.available_marks', 'wb') as outfile:
# sp.save(outfile, args.mark_avail)
def _gauss_kernel(winsize):
x = sp.mgrid[-int(winsize):int(winsize)+1]
g = sp.exp(-(x**2/float(winsize)))
return g / g.sum()
def convert_data(args):
"""histogram both treatment and control data as specified by args
This saves the complete X matrix
"""
if args.download_first:
download_data(args)
I = len(args.species)
L = len(args.marks)
final_data = None
start_positions = {}
# make sure all the data is present...
for species in args.species:
for mark in args.marks:
d_files = [f for f in glob.glob(args.bam_template.format(
species=species, mark=mark, repnum='*'))]
if len(d_files) == 0:
raise RuntimeError("No histone data for species %s mark %s Expected: %s" %
(species, mark, args.bam_template.format(
species=species, mark=mark, repnum='*')))
for i, species in enumerate(args.species):
for l, mark in enumerate(args.marks):
d_obs = {}
d_files = [f for f in glob.glob(args.bam_template.format(
species=species, mark=mark, repnum='*'))]
if len(d_files) == 0:
pass
else:
for mark_file in d_files:
read_counts = histogram_reads(mark_file, args.windowsize, args.chromosomes)
# d_obs.append(histogram_reads(mark_file, args.windowsize,
# args.chromosomes))
for chrom in read_counts:
d_obs.setdefault(chrom, []).append(read_counts[chrom])
for chrom in d_obs:
d_obs[chrom] = reduce(operator.add, d_obs[chrom]) # add all replicants together
#print 'before per million:', d_obs.sum()
#d_obs /= (d_obs.sum() / 1e7) # convert to reads mapping per ten million
# convert to a binary array with global poisson
num_reads = sum(x.sum() for x in d_obs.values())
num_bins = float(sum(len(x) for x in d_obs.values()))
genome_rate = num_reads / num_bins
print 'after per million', num_reads, num_bins, genome_rate
if final_data is None:
final_data = {}
for chrom in d_obs:
d_obs[chrom] = call_significant_sites(d_obs[chrom], genome_rate, args.max_pvalue)
if chrom not in final_data:
final_data[chrom] = sp.zeros((I, len(d_obs[chrom]), L), dtype=sp.int8)
final_data[chrom][i, :, l] = d_obs[chrom]
for chrom in final_data:
start_positions[os.path.split(args.outfile.format(chrom=chrom))[1]] = (chrom, 0)
print "output file:", args.outfile.format(chrom=chrom)
with open(args.outfile.format(chrom=chrom), 'wb') as outfile:
sp.save(outfile, final_data[chrom])
with open('start_positions.pkl', 'w') as outfile:
pickle.dump(dict(windowsize=args.windowsize, valid_species=valid_species,
valid_marks=valid_marks, start_positions=start_positions),
outfile, -1)
def download_data(args):
"""Download any missing histone modification data from UCSC and check md5s.
"""
md5s = urllib.urlopen(args.base_url % 'md5sum.txt').read().strip().split('\n')
md5s = dict(reversed(l.strip().split()) for l in md5s)
for species in args.species:
for mark in args.marks:
for rep in range(10):
fname = args.bam_template.format(species=species,
mark=mark, repnum=rep)
if fname not in md5s:
continue
if os.path.exists(fname):
#m = hashlib.md5(open(fname, 'rb').read()).hexdigest()
#if m != md5s[fname]: # destroy if md5 doesn't match
# print 'removing incomplete file: %s' % fname
# print m, md5s[fname]
# os.unlink(fname)
#else:
print 'skipping already downloaded %s' % fname
continue
with open(fname, 'wb') as outfile:
try:
print 'downloading %s' % fname
page = urllib.urlopen(args.base_url % fname)
while True:
data = page.read(81920)
if not data:
break
outfile.write(data)
except RuntimeError as e:
print 'Skipping...', e.message
def histogram_reads(bam_file, windowsize, chromosomes='all', exclude_chroms=['chrM', 'chrY', 'chrX'],
skip_qc_fail=True):
"""Histogram the counts along bam_file, resulting in a vector.
This will concatenate all chromosomes, together, so to get the
counts for a particular chromosome, pass it as a list, a la
>>> histogram_reads(my_bam_file, chromosomes=['chr1'])
"""
print 'histogramming', bam_file
reads_bam = pysam.Samfile(bam_file, 'rb')
# get the chromosome name and lengths for our subset
if chromosomes == 'all':
chromosomes = filter(lambda c: c not in exclude_chroms,
reads_bam.references)
chrom_set = set(chromosomes)
chrom_lengths = {c : reads_bam.lengths[reads_bam.references.index(c)]
for c in chromosomes}
else:
chromosomes = filter(lambda c: c not in exclude_chroms,
chromosomes)
chrom_lengths = {c : reads_bam.lengths[reads_bam.references.index(c)]
for c in chromosomes if c not in
exclude_chroms}
chrom_set = set(chromosomes)
# # offset of each chromosome into concatenated chrom bins
# chrom_ends = list(((sp.array(chrom_lengths) // windowsize) + 1).cumsum())
# chrom_starts = dict(zip(chromosomes, [0] + chrom_ends[:-1]))
read_counts = {}
# create the histogram: 1 x sum(lengths) array
# read_counts = sp.zeros(chrom_ends[-1], dtype=float_type)
# count the reads in the input
for read in reads_bam:
if skip_qc_fail and (read.is_qcfail or read.is_unmapped or read.is_secondary or
read.is_duplicate or read.mapq == 0):
continue # filter out non-mapping reads
chrom = reads_bam.references[read.tid]
if chrom in chrom_set: # chrom requested?
# offset = chrom_starts[chrom]
offset = 0
if read.is_paired:
if read.is_proper_pair:
# add at the middle of the mates
bin = offset + ((read.pos + read.mpos +
read.rlen) / 2) // windowsize
# read_counts[min(chrom_ends[-1] - 1, bin)] += 1.
if chrom not in read_counts:
read_counts[chrom] = sp.zeros(chrom_lengths[chrom] // windowsize, dtype=float_type)
read_counts[chrom][min(chrom_lengths[chrom] // windowsize - 1, bin)] += 1.
else:
# add at the middle of the fragment
bin = offset + (read.pos + 100) // windowsize
if chrom not in read_counts:
read_counts[chrom] = sp.zeros(chrom_lengths[chrom] // windowsize, dtype=float_type)
read_counts[chrom][min(chrom_lengths[chrom] // windowsize - 1, bin)] += 1.
return read_counts
def call_significant_sites(fg_counts, bg_counts, max_pvalue):
"""binarize fg_counts (significant=1) using bg_counts as a local poisson
rate. the poisson survival must be < sig_level.
"""
print 'most reads in a bin:', fg_counts.max(), 'poisson expected rate:', bg_counts
print 'read count vs binary present:' , {i: poisson.sf(i, bg_counts) < max_pvalue for i in range(20)}
return poisson.sf(fg_counts, bg_counts) < max_pvalue
if __name__ == '__main__':
main()
| bsd-3-clause |
joernhees/scikit-learn | examples/plot_compare_reduction.py | 45 | 4959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of ``GridSearchCV`` and
``Pipeline`` to optimize over different classes of estimators in a
single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality
reductions are compared to univariate feature selection during
the grid search.
Additionally, ``Pipeline`` can be instantiated with the ``memory``
argument to memoize the transformers within the pipeline, avoiding to fit
again the same transformers over and over.
Note that the use of ``memory`` to enable caching becomes interesting when the
fitting of a transformer is costly.
"""
###############################################################################
# Illustration of ``Pipeline`` and ``GridSearchCV``
###############################################################################
# This section illustrates the use of a ``Pipeline`` with
# ``GridSearchCV``
# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
###############################################################################
# Caching transformers within a ``Pipeline``
###############################################################################
# It is sometimes worthwhile storing the state of a specific transformer
# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers
# such situations. Therefore, we use the argument ``memory`` to enable caching.
#
# .. warning::
# Note that this example is, however, only an illustration since for this
# specific case fitting PCA is not necessarily slower than loading the
# cache. Hence, use the ``memory`` constructor parameter when the fitting
# of a transformer is costly.
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir, verbose=10)
cached_pipe = Pipeline([('reduce_dim', PCA()),
('classify', LinearSVC())],
memory=memory)
# This time, a cached pipeline will be used within the grid search
grid = GridSearchCV(cached_pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
# Delete the temporary cache before exiting
rmtree(cachedir)
###############################################################################
# The ``PCA`` fitting is only computed at the evaluation of the first
# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The
# other configurations of ``C`` will trigger the loading of the cached ``PCA``
# estimator data, leading to save processing time. Therefore, the use of
# caching the pipeline using ``memory`` is highly beneficial when fitting
# a transformer is costly.
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 216 | 13290 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
BrechtBa/plottools | doc/source/conf.py | 1 | 10429 | # -*- coding: utf-8 -*-
#
# Someproject documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 05 08:56:23 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
sys.path.append(os.path.abspath('sphinxext'))
# import the version string
from plottools.__version__ import version as __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'plottools'
copyright = u'2016, Brecht Baeten'
author = u'Brecht Baeten'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = unicode(__version__,'utf-8')
# The full version, including alpha/beta/rc tags.
release = unicode(__version__,'utf-8')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Someproject v3.1.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Someprojectdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'plottools.tex', u'plottools Documentation',
u'Brecht Baeten', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'plottools', u'plottools Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'plottools', u'plottools Documentation',
author, 'plottools', 'some descr',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
autosummary_generate = True
# -- Options for plot directive -------------------------------------------
plot_include_source = True | gpl-2.0 |
ssaeger/scikit-learn | sklearn/tree/tests/test_export.py | 31 | 9588 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
xzh86/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
MatthewThe/kaggle | titanic/bin/random_forest.py | 1 | 2193 | #!/usr/bin/python
import csv
import numpy as np
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
# 0: PassengerId, 1: Survived, 2: Pclass, 3: Name, 4: Sex (male,female), 5: Age, 6: SibSp, 7: ParCh, 8: Ticket, 9: Fare, 10: Cabin, 11: Embarked (S,C,Q)
# 1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S
def loadData(inputFile, means = [], stds = []):
X = [] # features
y = [] # labels
missingCount = 0
reader = csv.reader(open(inputFile, 'rb'))
headers = reader.next()
m = {"S": 1, "C": 2, "Q": 3, "": 4}
for row in reader:
y.append(int(row[1]))
pclass = int(row[2])
sex = 1 if row[4] == "male" else 0
age = float(row[5]) if row[5] != "" else 25
sibsp = int(row[6])
parch = int(row[7])
fare = float(row[9])
embarkeds = 1 if row[11] == "S" else 0
embarkedc = 1 if row[11] == "C" else 0
embarkedq = 1 if row[11] == "Q" else 0
X.append([pclass, sex, age, sibsp, parch, fare, embarkeds, embarkedc, embarkedq])
# scale each feature to standard normal
X = np.array(X)
calcNormalization = False
if len(means) == 0:
calcNormalization = True
numFeatures = X.shape[1]
for i, col in enumerate(range(numFeatures)):
a = X[:,col]
if calcNormalization:
means.append(np.mean(a))
stds.append(np.std(a))
X[:,col] = (a - means[i]) / stds[i]
if calcNormalization:
return X, y, means, stds
else:
return X, y
trainFile = '../data/train_validation.csv'
X, y, means, stds = loadData(trainFile)
print "#Passengers", len(y)
print "Survived", sum(y)
print "Deceased", len(y) - sum(y)
testFile = '../data/test_validation.csv'
XTest, yTest = loadData(testFile, means, stds)
rf_clf = RandomForestClassifier(n_estimators=100)
rf_clf.fit(X, y)
yTestPredicted = rf_clf.predict(XTest)
yTrainPredicted = rf_clf.predict(X)
print ""
print "Random forest (10)"
print "Correctly predicted Train:", sum(y == yTrainPredicted)
print "Incorrectly predicted Train:", sum(y != yTrainPredicted)
print "Correctly predicted Test:", sum(yTest == yTestPredicted)
print "Incorrectly predicted Test:", sum(yTest != yTestPredicted)
plt.show()
| apache-2.0 |
nickdex/cosmos | code/artificial_intelligence/src/particle_swarm_optimization/gbestPSO/Gbest3D.py | 3 | 2194 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def f(args):
return np.sum([x ** 2 for x in args])
dimensions = 2
boundary = (-10, 10)
particles = 20
positions = []
velocities = []
w_min = 0.01
w_max = 0.1
c1 = 0
c2 = 0
num_iters = 20
for i in range(particles):
positions.append(np.random.uniform(boundary[0], boundary[1], dimensions))
positions = np.array(positions)
for i in range(particles):
velocities.append(np.random.uniform(-1, 1, dimensions))
velocities = np.array(velocities)
best_locals = positions
gbest = positions[0]
for p in positions:
if f(p) < f(gbest):
gbest = p
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
fig.show()
x = y = np.arange(boundary[0], boundary[1], 0.1)
surface_x, surface_y = np.meshgrid(x, y)
surface_z = np.array(
[f([x, y]) for x, y in zip(np.ravel(surface_x), np.ravel(surface_y))]
).reshape(surface_x.shape)
positions_x = [p[0] for p in positions]
positions_y = [p[1] for p in positions]
positions_z = [f([x, y]) for x, y in zip(positions_x, positions_y)]
for k in range(num_iters):
for p in range(particles):
for d in range(dimensions):
c1 = 2.5 - 2 * (k / num_iters)
c2 = 0.5 + 2 * (k / num_iters)
w = w_max - ((w_max - w_min) * k) / num_iters
r1, r2 = np.random.rand(), np.random.rand()
velocities[p][d] = (
w * velocities[p][d]
+ c1 * r1 * (best_locals[p][d] - positions[p][d])
+ c2 * r2 * (gbest[d] - positions[p][d])
)
positions[p][d] += velocities[p][d]
if f(positions[p]) < f(best_locals[p]):
best_locals[p] = positions[p]
if f(best_locals[p]) < f(gbest):
gbest = best_locals[p]
ax.clear()
ax.plot_surface(surface_x, surface_y, surface_z, alpha=0.3)
positions_x = [p[0] for p in positions]
positions_y = [p[1] for p in positions]
positions_z = [f([x, y]) for x, y in zip(positions_x, positions_y)]
ax.scatter(positions_x, positions_y, positions_z, c="#FF0000")
fig.canvas.draw()
print("Iter: {}".format(k))
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/transform/tests/test_radon_transform.py | 2 | 15936 | from __future__ import print_function, division
import os
import itertools
import numpy as np
from skimage import data_dir
from skimage.io import imread
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage._shared import testing
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_gray=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
shapes_for_test_radon_center = [(16, 16), (17, 17)]
circles_for_test_radon_center = [False, True]
@testing.parametrize("shape, circle",
itertools.product(shapes_for_test_radon_center,
circles_for_test_radon_center))
def test_radon_center(shape, circle):
check_radon_center(shape, circle)
rectangular_shapes = [(32, 16), (33, 17)]
@testing.parametrize("shape", rectangular_shapes)
def test_radon_center_rectangular(shape):
check_radon_center(shape, False)
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@testing.parametrize("size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_radon_center))
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
# cubic interpolation is slow; only run one test for it
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@testing.parametrize("interpolation_type, filter_type",
radon_iradon_inputs)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@testing.parametrize("shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes))
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with testing.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@testing.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
# if adding more shapes to test data, you might want to look at commit d0f2bac3f
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@testing.parametrize("shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle,
interpolations, output_sizes))
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
multichannel=False, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
| gpl-3.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/core/dtypes.py | 9 | 5492 | """ define extension dtypes """
import re
import numpy as np
from pandas import compat
class ExtensionDtype(object):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
name = None
names = None
type = None
subdtype = None
kind = None
str = None
num = 100
shape = tuple()
itemsize = 8
base = None
isbuiltin = 0
isnative = 0
_metadata = []
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
raise NotImplementedError("sub-classes should implement an __hash__ method")
def __eq__(self, other):
raise NotImplementedError("sub-classes should implement an __eq__ method")
@classmethod
def is_dtype(cls, dtype):
""" Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
if hasattr(dtype, 'dtype'):
dtype = dtype.dtype
if isinstance(dtype, cls):
return True
elif isinstance(dtype, np.dtype):
return False
try:
return cls.construct_from_string(dtype) is not None
except:
return False
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
class CategoricalDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
type = CategoricalDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
if string == 'category':
return cls()
except:
pass
raise TypeError("cannot construct a CategoricalDtype")
class DatetimeTZDtypeType(type):
"""
the type of DatetimeTZDtype, this metaclass determines subclass ability
"""
pass
class DatetimeTZDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns]
"""
type = DatetimeTZDtypeType
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
_metadata = ['unit','tz']
_match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
def __init__(self, unit, tz=None):
"""
Parameters
----------
unit : string unit that this represents, currently must be 'ns'
tz : string tz that this represents
"""
if isinstance(unit, DatetimeTZDtype):
self.unit, self.tz = unit.unit, unit.tz
return
if tz is None:
# we were passed a string that we can construct
try:
m = self._match.search(unit)
if m is not None:
self.unit = m.groupdict()['unit']
self.tz = m.groupdict()['tz']
return
except:
raise ValueError("could not construct DatetimeTZDtype")
raise ValueError("DatetimeTZDtype constructor must have a tz supplied")
if unit != 'ns':
raise ValueError("DatetimeTZDtype only supports ns units")
self.unit = unit
self.tz = tz
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
return cls(unit=string)
except ValueError:
raise TypeError("could not construct DatetimeTZDtype")
def __unicode__(self):
# format the tz
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and self.tz == other.tz
| mit |
freeman-lab/dask | dask/dataframe/tests/test_multi.py | 5 | 5300 | import dask.dataframe as dd
import pandas as pd
from dask.dataframe.multi import (align_partitions, join_indexed_dataframes,
hash_join, concat_indexed_dataframes)
import pandas.util.testing as tm
from dask.async import get_sync
def test_align_partitions():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.repartition(A, [10, 40, 60])
B = pd.DataFrame({'x': [1, 2, 3, 4], 'y': list('abda')},
index=[30, 70, 80, 100])
b = dd.repartition(B, [30, 80, 100])
(aa, bb), divisions, L = align_partitions(a, b)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert divisions == (10, 30, 40, 60, 80, 100)
assert isinstance(L, list)
assert len(divisions) == 1 + len(L)
assert L == [[(aa._name, 0), None],
[(aa._name, 1), (bb._name, 0)],
[(aa._name, 2), (bb._name, 1)],
[None, (bb._name, 2)],
[None, (bb._name, 3)]]
def test_join_indexed_dataframe_to_indexed_dataframe():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6]},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'y': list('abcdef')},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
c = join_indexed_dataframes(a, b, how='left')
assert c.divisions[0] == a.divisions[0]
assert c.divisions[-1] == a.divisions[-1]
tm.assert_frame_equal(c.compute(), A.join(B))
c = join_indexed_dataframes(a, b, how='right')
assert c.divisions[0] == b.divisions[0]
assert c.divisions[-1] == b.divisions[-1]
tm.assert_frame_equal(c.compute(), A.join(B, how='right'))
c = join_indexed_dataframes(a, b, how='inner')
assert c.divisions[0] == 1
assert c.divisions[-1] == 7
tm.assert_frame_equal(c.compute(), A.join(B, how='inner'))
c = join_indexed_dataframes(a, b, how='outer')
assert c.divisions[0] == 1
assert c.divisions[-1] == 8
tm.assert_frame_equal(c.compute(), A.join(B, how='outer'))
def list_eq(a, b):
if isinstance(a, dd.DataFrame):
a = a.compute(get=get_sync)
if isinstance(b, dd.DataFrame):
b = b.compute(get=get_sync)
assert list(a.columns) == list(b.columns)
assert sorted(a.fillna(100).values.tolist()) == \
sorted(b.fillna(100).values.tolist())
def test_hash_join():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
for how in ['inner', 'left', 'right', 'outer']:
c = hash_join(a, 'y', b, 'y', how)
result = c.compute()
expected = pd.merge(A, B, how, 'y')
assert list(result.columns) == list(expected.columns)
assert sorted(result.fillna(100).values.tolist()) == \
sorted(expected.fillna(100).values.tolist())
# Different columns and npartitions
c = hash_join(a, 'x', b, 'z', 'outer', npartitions=3)
assert c.npartitions == 3
result = c.compute()
expected = pd.merge(A, B, 'outer', None, 'x', 'z')
assert list(result.columns) == list(expected.columns)
assert sorted(result.fillna(100).values.tolist()) == \
sorted(expected.fillna(100).values.tolist())
def test_indexed_concat():
A = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7], 'y': list('abcdef')},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'x': [10, 20, 40, 50, 60, 80]},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
for how in ['inner', 'outer']:
c = concat_indexed_dataframes([a, b], join=how)
result = c.compute()
expected = pd.concat([A, B], 0, how)
assert list(result.columns) == list(expected.columns)
assert sorted(zip(result.values.tolist(), result.index.values.tolist())) == \
sorted(zip(expected.values.tolist(), expected.index.values.tolist()))
def test_merge():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
list_eq(dd.merge(a, b, left_index=True, right_index=True),
pd.merge(A, B, left_index=True, right_index=True))
list_eq(dd.merge(a, b, on='y'),
pd.merge(A, B, on='y'))
list_eq(dd.merge(a, b, left_on='x', right_on='z'),
pd.merge(A, B, left_on='x', right_on='z'))
list_eq(dd.merge(a, b),
pd.merge(A, B))
list_eq(dd.merge(a, B),
pd.merge(A, B))
list_eq(dd.merge(A, b),
pd.merge(A, B))
list_eq(dd.merge(A, B),
pd.merge(A, B))
list_eq(dd.merge(a, b, left_index=True, right_index=True),
pd.merge(A, B, left_index=True, right_index=True))
# list_eq(dd.merge(a, b, left_on='x', right_index=True),
# pd.merge(A, B, left_on='x', right_index=True))
# list_eq(dd.merge(a, B, left_index=True, right_on='y'),
# pd.merge(A, B, left_index=True, right_on='y'))
| bsd-3-clause |
habi/GlobalDiagnostiX | AngularOpening.py | 1 | 6875 | # -*- coding: utf-8 -*-
"""
Calculate the angular opening of the lens, including the shades that we have
to build in between.
"""
from __future__ import division
import optparse
import sys
import os
import numpy
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.patches import Rectangle
os.system('clear')
# Use Pythons Optionparser to define and read the options, and also
# give some help to the user
parser = optparse.OptionParser()
usage = "usage: %prog [options] arg"
parser.add_option('-d', dest='Distance', type='float', default=134,
metavar='123', help='Scintillator-CMOS distance [mm]. '
'Default = %default mm')
parser.add_option('-f', dest='FOV', type='float', default=450 / 3,
metavar='430', help='Desired field of view [mm]. Default = '
'%default mm')
parser.add_option('-o', dest='Overlap', type='float', default=2,
metavar='16', help='Overlap between the images [%]. Default '
'= %default %')
parser.add_option('-p', dest='ParaventLength', type='float', default=100,
metavar='123', help='Length of the paravents. Default = '
'%default mm')
parser.add_option('-l', dest='LensLength', type='float', default=16.8,
metavar='11.3', help='Length of the lens. Default = '
'%default mm')
parser.add_option('-b', dest='BackFocalLength', type='float', default=6.5,
metavar='9.0', help='Back focal length of the lens. Default '
'= %default mm')
parser.add_option('-s', dest='SaveImage', default=True, action='store_true',
help='Write output, (Default: %default)')
(options, args) = parser.parse_args()
# TBL 6 C 3MP specifications, as from TIS and copied here: http://cl.ly/YQ4Z
# FOV = 145 mm without overlap
# LensLengtht = 10 mm
# BackFocalLength = 6.5 mm
# Measured FOV at a distance of 13 cm is 135 x 105 mm
# show the help if the needed parameters (distance and FOV) are not given
if options.Distance is None or options.FOV is None:
parser.print_help()
print ''
print 'Example:'
print 'The command below shows the configuration of a detector with '
print 'an optics with an opening angle of 78° used to get a field'
print 'of view of 50 cm:'
print ''
print sys.argv[0], '-a 78 -f 50'
print ''
sys.exit(1)
print ''
# tan(\alpha/2) = (FOV/2) / Distance
# Distance = (FOV/2)/tan(\alpha/2)
print 'We calculate with a CMOS-Scintillator distance of', options.Distance, \
'mm.'
print 'With a back focal length of', options.BackFocalLength, \
'mm and a lens length of', options.LensLength, 'mm we have a distance of',\
options.Distance - options.BackFocalLength - options.LensLength, \
'mm from the front of the lens to the scintillator.'
print 'The FOV is corrected with an overlap of', options.Overlap, '% from', \
options.FOV, 'mm to',
options.FOV = options.FOV * (1 + (options.Overlap / 100))
print options.FOV, 'mm.'
print 'For a visible FOV of', options.FOV, 'mm at a distance of', \
options.Distance, 'mm we get a calculated opening angle of the lens of',
OpeningAngle = numpy.rad2deg(numpy.arctan((options.FOV / 2) /
options.Distance)) * 2
print round(OpeningAngle, 1), 'degrees'
plt.figure(figsize=(5, 15))
for Displacement in (0, - options.FOV / (1 + options.Overlap / 100),
options.FOV / (1 + options.Overlap / 100)):
# Central axis
plt.axhline(Displacement, color='k', linestyle='--')
# CMOS
cmoscolor = 'b'
plt.plot((0, 0), (Displacement + 3, Displacement - 3), linewidth='5',
color=cmoscolor)
# Lens
rect = Rectangle((options.BackFocalLength, Displacement - 14 / 2),
options.LensLength, 14, facecolor="#aaaaaa")
plt.gca().add_patch(rect)
# Opening angle, based on CMOS
wedge = Wedge((0, Displacement), options.Distance * 0.309,
-OpeningAngle / 2, OpeningAngle / 2, fill=True, color='r',
alpha=0.125)
plt.gca().add_patch(wedge)
plt.plot((0, options.Distance),
(Displacement, Displacement + options.FOV / 2), color='k',
linestyle='--', alpha=0.25)
plt.plot((0, options.Distance),
(Displacement, Displacement - options.FOV / 2), color='k',
linestyle='--', alpha=0.25)
# Scintillator FOV
screencolor = 'k'
plt.plot([options.Distance, options.Distance],
[Displacement + (options.FOV / 2),
Displacement - (options.FOV / 2)], linewidth='6',
color=screencolor)
screencolor = 'g'
plt.plot([options.Distance, options.Distance],
[Displacement + (options.FOV / 2),
Displacement - (options.FOV / 2)], linewidth='4',
color=screencolor)
# FOV drawn from center of lens
beamcolor = 'r'
plt.plot([options.BackFocalLength + options.LensLength,
options.Distance], [Displacement, Displacement + options.FOV /
2], beamcolor)
plt.plot([options.BackFocalLength + options.LensLength,
options.Distance], [Displacement, Displacement - options.FOV /
2], beamcolor)
# Paravents. Position calculated back from overlap
paraventcolor = 'k'
plt.plot([0, options.ParaventLength],
[Displacement - (options.FOV / (1 + options.Overlap / 100) / 2),
Displacement - (options.FOV / (1 + options.Overlap / 100) / 2)],
linewidth='5', color=paraventcolor)
# Paravent blocking,
beamcolor = 'g'
plt.plot([options.BackFocalLength + options.LensLength, options.Distance],
[Displacement, Displacement + options.FOV / 2], beamcolor)
plt.plot([options.BackFocalLength + options.LensLength, options.Distance],
[Displacement, Displacement - options.FOV / 2], beamcolor)
# Nice plotting
plt.title('Angular opening: ' + str(round(OpeningAngle, 2)) + '\nFOV size: ' +
str(options.FOV) + ' mm (including overlap of ' +
str(options.Overlap) + ' %)\nWorking Distance: ' +
str('%.2f' % options.Distance) + ' mm\nParavent length: ' +
str('%.2f' % options.ParaventLength) + ' mm')
plt.xlabel('Distance [mm]')
plt.axis('equal')
if options.SaveImage:
SaveName = 'Paravents_' + str(str('%.2f' % OpeningAngle)) + '_wd_' + \
str('%.2f' % options.Distance) + 'mm_FOV_' + \
str('%.2f' % options.FOV) + 'mm'
FigureName = ''.join([SaveName, '.png'])
plt.savefig(FigureName)
print 'Figure saved to ' + FigureName
plt.show()
| unlicense |
rafaelvalle/MDI | nnet_lasagne.py | 1 | 10609 | # code adapted from lasagne tutorial
# http://lasagne.readthedocs.org/en/latest/user/tutorial.html
import time
import os
from itertools import product
import numpy as np
from sklearn.cross_validation import KFold
import theano
from theano import tensor as T
import lasagne
from params import nnet_params_dict, feats_train_folder
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def build_network(input_var, input_shape, nonlins, depth=2,
widths=(1000, 1000, 10), drops=(0.2, 0.5)):
"""
Parameters
----------
input_var : Theano symbolic variable or None (default: None)
Variable representing a network input.
input_shape : tuple of int or None (batchsize, rows, cols)
input_shape of the input. Any element can be set to None to indicate
that dimension is not fixed at compile time
"""
# GlorotUniform is the default mechanism for initializing weights
for i in range(depth):
if i == 0:
network = lasagne.layers.InputLayer(shape=input_shape,
input_var=input_var)
else:
network = lasagne.layers.DenseLayer(network,
widths[i],
nonlinearity=nonlins[i])
if drops[i] != None:
network = lasagne.layers.DropoutLayer(network, p=drops[i])
return network
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def zerosX(X):
return np.zeros(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, gamma):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
updates.append([p, p - g * gamma])
return updates
def model(X, w_h, w_o):
h = T.nnet.sigmoid(T.dot(X, w_h))
pyx = T.nnet.softmax(T.dot(h, w_o))
return pyx
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def batch_ids(batch_size, x_train, train_idx):
# change to iterator
ids = zip(range(0, len(x_train[train_idx]), batch_size),
range(batch_size, len(x_train[train_idx]), batch_size))
return ids
verbose = True
# train on every perturbed dataset
filepaths = np.loadtxt("include_data.csv", dtype=object, delimiter=",")
for (include, train_filename, test_filename) in filepaths:
if include == '1':
print '\nExecuting {}'.format(train_filename)
# Load training and test sets
x_train = np.load(os.path.join(feats_train_folder,
train_filename)).astype(np.float32)
y_train = x_train[:, -1].astype(int)
# y_train = (np.eye(2, dtype=np.float32)[x_train[:,-1].astype(int)])
# remove label column from x_train
x_train = x_train[:, :-1]
# Network topology
n_obs = x_train.shape[0]
n_inputs = x_train.shape[1]
n_outputs = len(np.unique(y_train))
# Cross-validation and Neural Net parameters
n_folds = nnet_params_dict['n_folds']
alphas = nnet_params_dict['alphas']
gammas = nnet_params_dict['gammas']
decay_rate = nnet_params_dict['decay_rate']
batch_sizes = nnet_params_dict['batch_sizes']
max_epoch = nnet_params_dict['max_epoch']
depth = nnet_params_dict['depth']
widths = nnet_params_dict['widths']
nonlins = nnet_params_dict['nonlins']
drops = nnet_params_dict['drops']
# Dictionary to store results
results_dict = {}
params_mat = [x for x in product(alphas, gammas, batch_sizes)]
params_mat = np.array(params_mat, dtype=theano.config.floatX)
params_mat = np.column_stack((params_mat,
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0])))
for param_idx in xrange(params_mat.shape[0]):
# load parameters for neural network model
alpha = params_mat[param_idx, 0]
gamma = params_mat[param_idx, 1]
batch_size = int(params_mat[param_idx, 2])
shape = (batch_size, x_train.shape[1])
# choose n_hidden nodes according to ...
n_hidden = int((n_obs / depth) / (alpha*(n_inputs+n_outputs)))
for i in range(1, depth-1):
widths[i] = n_hidden
model_str = ('\nalpha {} gamma {} batch size {} '
'n_hidden {} depth {}'
'\nnonlins {}'
'\ndrops {}'.format(alpha, gamma, batch_size,
n_hidden, depth, nonlins,
drops))
print model_str
# specify input and target theano data types
input_var = T.fmatrix('input')
target_var = T.ivector('target')
# build neural network model
network = build_network(input_var, shape, nonlins, depth, widths,
drops)
# create loss expression for training
"""
py_x = model(input_var, w_h, w_o)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, target_var),
dtype=theano.config.floatX)
"""
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction,
target_var)
loss = loss.mean()
# create paraneter update expressions for training
"""
params = [w_h, w_o]
updates = sgd(cost, params, gamma=gamma)
"""
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.adadelta(loss, params,
learning_rate=gamma,
rho=decay_rate)
# create loss expression for validation and classification accuracy
# Deterministic forward pass to disable droupout layers
test_prediction = lasagne.layers.get_output(network,
deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(
test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1),
target_var), dtype=theano.config.floatX)
# compile functions for performing training step and returning
# corresponding training loss
train_fn = theano.function(inputs=[input_var, target_var],
outputs=loss,
updates=updates,
allow_input_downcast=True)
# compile a function to compute the validation loss and accuracy
val_fn = theano.function(inputs=[input_var, target_var],
outputs=[test_loss, test_acc],
allow_input_downcast=True)
# create kfold iterator
kf = KFold(x_train.shape[0], n_folds=n_folds)
error_rates = []
val_losses = []
running_time = []
fold = 1
for train_idx, val_idx in kf:
start_time = time.time()
for i in range(max_epoch):
train_err = 0
train_batches = 0
for start, end in batch_ids(batch_size, x_train,
train_idx):
train_err += train_fn(x_train[train_idx][start:end],
y_train[train_idx][start:end])
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
for start, end in batch_ids(batch_size, x_train,
train_idx):
err, acc = val_fn(x_train[val_idx], y_train[val_idx])
val_err += err
val_acc += acc
val_batches += 1
error_rate = (1 - (val_acc / val_batches)) * 100
val_loss = val_err / val_batches
print("Final results:")
print(" val loss:\t\t\t{:.6f}".format(val_loss))
print(" val error rate:\t\t{:.2f} %".format(error_rate))
error_rates.append(error_rate)
val_losses.append(val_loss)
running_time.append(np.around((time.time() -
start_time) / 60., 1))
fold += 1
params_mat[param_idx, 3] = np.mean(error_rates)
params_mat[param_idx, 4] = np.mean(val_losses)
params_mat[param_idx, 5] = np.mean(running_time)
print('alpha {} gamma {} batchsize {} error rate {} '
'validation cost {} '
'running time {}'.format(params_mat[param_idx, 0],
params_mat[param_idx, 1],
params_mat[param_idx, 2],
params_mat[param_idx, 3],
params_mat[param_idx, 4],
params_mat[param_idx, 5]))
# Save params matrix to disk
params_mat.dump(('results/train/{}'
'_results.np').format(train_filename[:-3]))
| mit |
andresmeh/pylibnidaqmx | nidaqmx/wxagg_plot.py | 16 | 4515 |
import os
import sys
import time
import traceback
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
import wx
from matplotlib.figure import Figure
class PlotFigure(wx.Frame):
def OnKeyPressed (self, event):
key = event.key
if key=='q':
self.OnClose(event)
def __init__(self, func, timer_period):
wx.Frame.__init__(self, None, -1, "Plot Figure")
self.fig = Figure((12,9), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.canvas.mpl_connect('key_press_event', self.OnKeyPressed)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
self.func = func
self.plot = None
self.timer_period = timer_period
self.timer = wx.Timer(self)
self.is_stopped = False
if os.name=='nt':
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
self.Bind(wx.EVT_TIMER, self.OnTimerWrap, self.timer)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.timer.Start(timer_period)
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnClose(self, event):
self.is_stopped = True
print 'Closing PlotFigure, please wait.'
self.timer.Stop()
self.Destroy()
def OnTimerWrap (self, evt):
if self.is_stopped:
print 'Ignoring timer callback'
return
t = time.time()
try:
self.OnTimer (evt)
except KeyboardInterrupt:
self.OnClose(evt)
duration = 1000*(time.time () - t)
if duration > self.timer_period:
print 'Changing timer_period from %s to %s msec' % (self.timer_period, 1.2*duration)
self.timer_period = 1.2*duration
self.timer.Stop()
self.timer.Start (self.timer_period)
def OnTimer(self, evt):
try:
xdata, ydata_list, legend = self.func()
except RuntimeError:
traceback.print_exc(file=sys.stderr)
self.OnClose(evt)
return
if len (ydata_list.shape)==1:
ydata_list = ydata_list.reshape((1, ydata_list.size))
if self.plot is None:
self.axes = self.fig.add_axes([0.1,0.1,0.8,0.8])
l = []
for ydata in ydata_list:
l.extend ([xdata, ydata])
self.plot = self.axes.plot(*l)
self.axes.set_xlabel('Seconds')
self.axes.set_ylabel('Volts')
self.axes.set_title('nof samples=%s' % (len(xdata)))
self.axes.legend (legend)
else:
self.axes.set_xlim(xmin = xdata[0], xmax=xdata[-1])
ymin, ymax = 1e9,-1e9
for line, data in zip (self.plot, ydata_list):
line.set_xdata(xdata)
line.set_ydata(data)
ymin, ymax = min (data.min (), ymin), max (data.max (), ymax)
dy = (ymax-ymin)/20
self.axes.set_ylim(ymin=ymin-dy, ymax=ymax+dy)
self.canvas.draw()
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
def animated_plot(func, timer_period):
app = wx.PySimpleApp(clearSigInt=False)
frame = PlotFigure(func, timer_period)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
from numpy import *
import time
start_time = time.time ()
def func():
x = arange (100, dtype=float)/100*pi
d = sin (x+(time.time ()-start_time))
return x, d, ['sin (x+time)']
try:
animated_plot (func, 1)
except Exception, msg:
print 'Got exception: %s' % ( msg)
else:
print 'Exited normally'
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/gridspec.py | 1 | 14943 | """
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, eg 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in kwargs.items():
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in _pylab_helpers.Gcf.figs.values():
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, eg 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/core/reshape/merge.py | 3 | 53914 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, Series, DataFrame,
Index, MultiIndex, Timedelta)
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_list_like,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util._decorators import Appender, Substitution
from pandas.core.sorting import is_int64_overflow_possible
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def ordered_merge(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
left_by=left_by, right_by=right_by,
fill_method=fill_method, suffixes=suffixes)
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
ordered_merge.__doc__ = merge_ordered.__doc__
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left
left_val
1 a
5 b
10 c
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms betwen the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms betwen the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propogate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(left)
is_rkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right[rk]._values)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left[lk]._values)
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergee's but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if is_categorical_dtype(lk) and is_categorical_dtype(rk):
if lk.is_dtype_equal(rk):
continue
elif is_categorical_dtype(lk) or is_categorical_dtype(rk):
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if (is_numeric_dtype(lk) and is_numeric_dtype(rk)):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object
if name in self.left.columns:
self.left = self.left.assign(
**{name: self.left[name].astype(object)})
if name in self.right.columns:
self.right = self.right.assign(
**{name: self.right[name].astype(object)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction, on_type):
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
def _asof_by_function(direction, on_type, by_type):
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
(direction, on_type, by_type), None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: ' + self.direction)
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for lk, rk in zip(left_join_keys, right_join_keys):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys, "
"must be the same type")
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = " keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(self.direction, on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(self.direction, on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to factorize on codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(lk.codes)
rk = _ensure_int64(rk.codes)
elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:l], new_labels[l:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| mit |
dhwang99/statistics_introduction | probility/dirichlet.py | 1 | 2446 | #encoding: utf8
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
from mpl_toolkits.mplot3d import Axes3D
from gamma_dist import gamma_values
import pdb
'''
Dir(X;alpha_vector) = (Gamma(sum(alpha_vector))/Multi(Gamma(alpha_i)) * Multi(xi^alpha_i)
'''
def dir_pdf(alpha_vector, X_vector):
#pdb.set_trace()
s = X_vector.sum()
return stats.dirichlet.pdf(X_vector, alpha_vector)
alpha_sum = alpha_vector.sum()
gamma_sum = gamma_values[alpha_sum]
gamma_multi = reduce (lambda x,y:x*y, map(lambda x:gamma_values[x], alpha_vector))
px_multi = np.prod(np.power(X_vector, alpha_vector-1))
pdf = gamma_sum / gamma_multi * px_multi
if pdf < 0:
pdf = 0
return pdf
def dir_pdfs(alpha_vector, X, Y):
m,n = X.shape
pdfs = np.zeros((m,n))
for i in xrange(m):
for j in xrange(n):
z = 1 - X[i,j] - Y[i,j]
if z > 0:
pdfs[i,j] = dir_pdf(alpha_vector, np.array([X[i,j], Y[i,j], z]))
return pdfs
def get_3d_points():
X = np.linspace(0.01, .99, 100)
Y = np.linspace(0.01, 0.99, 100)
return np.meshgrid(X, Y)
'''
only for 3-d
'''
def draw_dir_dist(alpha_vector, fname):
colors = ['r', 'b', 'k', 'g', 'm', 'c']
ls = []
ab_lables = '%s:%s:%s' % (alpha_vector[0], alpha_vector[1], alpha_vector[2])
a_v = alpha_vector
X,Y = get_3d_points() # sum(xi)=1
'''
pdfs = map(lambda x:beta_pdf(x, a,b), points)
l, = plt.plot(points, pdfs, color=colors[i%len(colors)])
'''
pdfs = dir_pdfs(a_v, X, Y)
'''
R = np.sqrt(X**2 + Y**2)
pdfs = np.sin(R)
'''
#pdb.set_trace()
fig = plt.figure()
ax = Axes3D(fig)
#画三维图
#ax.plot_surface(X, Y, pdfs, rstride=1, cstride=1, cmap='rainbow')
surf = ax.plot_surface(X, Y, pdfs, rstride=1, cstride=1, cmap='jet', linewidth=0, antialiased=False)
plt.savefig(fname, format='png')
if __name__ == "__main__":
a_v = np.array([0.1, 0.1, 0.1])
fname = "images/dir0.png"
draw_dir_dist(a_v, fname)
a_v = np.array([0.5, 0.5, 0.5])
fname = "images/dir1.png"
draw_dir_dist(a_v, fname)
a_v = np.array([1, 1, 1])
fname = "images/dir2.png"
draw_dir_dist(a_v, fname)
a_v = np.array([5, 5, 10])
fname = "images/dir4.png"
draw_dir_dist(a_v, fname)
a_v = np.array([10, 10, 10])
fname = "images/dir3.png"
draw_dir_dist(a_v, fname)
| gpl-3.0 |
prasanna08/oppia-ml | core/classifiers/TextClassifier/TextClassifier.py | 1 | 7667 | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifier for free-form text answers."""
import json
import logging
import time
from sklearn import model_selection
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
from core.classifiers import base
from core.classifiers import classifier_utils
class TextClassifier(base.BaseClassifier):
"""A classifier that uses supervised learning to match free-form text
answers to answer groups. The classifier trains on answers that exploration
editors have assigned to an answer group. This classifier uses scikit's
Support Vector Classifier (SVC) to obtain the best model using the linear
kernel.
"""
def __init__(self):
super(TextClassifier, self).__init__()
# sklearn.svm.SVC classifier object.
self.best_clf = None
# sklearn.feature_extraction.text.CountVectorizer object. It fits
# text into a feature vector made up of word counts.
self.count_vector = None
# A dict representing the best parameters for the
# sklearn.svm.SVC classifier.
self.best_params = None
# The f1 score of the best classifier found with GridSearch.
self.best_score = None
# Time taken to train the classifier
self.exec_time = None
@property
def name_in_job_result_proto(self):
return 'text_classifier'
@property
def type_in_job_result_proto(self):
return '%sFrozenModel' % (self.__class__.__name__)
def train(self, training_data):
"""Trains classifier using given training_data.
Args:
training_data: list(dict). The training data that is used for
training the classifier. The list contains dicts where each
dict represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': ['a1', 'a2']
},
{
'answer_group_index': 2,
'answers': ['a2', 'a3']
}
]
"""
x = []
y = []
start = time.time()
for answer_group in training_data:
for answer in answer_group['answers']:
x.append(answer)
y.append(answer_group['answer_group_index'])
count_vector = CountVectorizer()
# Learn a vocabulary dictionary of all tokens in the raw documents.
count_vector.fit(x)
# Transform document to document-term matrix
transformed_vector = count_vector.transform(x)
# Set the range of parameters for the exhaustive grid search.
param_grid = [{
u'C': [0.5, 1, 10, 50, 100],
u'kernel': [u'linear']
}]
clf = model_selection.GridSearchCV(
svm.SVC(probability=True), param_grid, scoring='f1_weighted',
n_jobs=-1)
clf.fit(transformed_vector, y)
end = time.time()
logging.info(
'The best score for GridSearch=%s', clf.best_score_)
logging.info(
'train() spent %f seconds for %d instances', end-start, len(x))
self.best_params = clf.best_params_
self.best_clf = clf.best_estimator_
self.best_score = clf.best_score_
self.count_vector = count_vector
self.exec_time = end-start
def to_dict(self):
"""Returns a dict representing this classifier.
Returns:
dict. A dictionary representation of classifier referred to
as 'classifier_data'. This data is used for prediction.
"""
classifier_data = {
u'SVM': classifier_utils.extract_svm_parameters(self.best_clf),
u'cv_vocabulary': self.count_vector.__dict__['vocabulary_'],
u'best_params': self.best_params,
u'best_score': self.best_score
}
return classifier_data
# pylint: disable=too-many-branches
def validate(self, classifier_data):
"""Validates classifier data.
Args:
classifier_data: dict of the classifier attributes specific to
the classifier algorithm used.
"""
allowed_top_level_keys = [u'SVM', u'cv_vocabulary', u'best_params',
u'best_score']
allowed_best_params_keys = [u'kernel', u'C']
allowed_svm_kernel_params_keys = [u'kernel', u'gamma', u'coef0',
u'degree']
allowed_svm_keys = [u'n_support', u'dual_coef', u'support_vectors',
u'intercept', u'classes', u'kernel_params',
u'probA', u'probB']
for key in allowed_top_level_keys:
if key not in classifier_data:
raise Exception(
'\'%s\' key not found in classifier_data.' % key)
if key != u'best_score':
if not isinstance(classifier_data[key], dict):
raise Exception(
'Expected \'%s\' to be dict but found \'%s\'.'
% (key, type(classifier_data[key])))
else:
if not isinstance(classifier_data[key], float):
raise Exception(
'Expected \'%s\' to be float but found \'%s\'.'
% (key, type(classifier_data[key])))
for key in allowed_best_params_keys:
if key not in classifier_data[u'best_params']:
raise Exception(
'\'%s\' key not found in \'best_params\''
' in classifier_data.' % key)
for key in allowed_svm_keys:
if key not in classifier_data[u'SVM']:
raise Exception(
'\'%s\' key not found in \'SVM\''
' in classifier_data.' % key)
for key in allowed_svm_kernel_params_keys:
if key not in classifier_data[u'SVM'][u'kernel_params']:
raise Exception(
'\'%s\' key not found in \'kernel_params\''
' in classifier_data.' % key)
if not isinstance(classifier_data[u'best_params'][u'C'], float):
raise Exception(
'Expected \'C\' to be a float but found \'%s\'' %
type(classifier_data[u'best_params'][u'C']))
if not isinstance(classifier_data[u'best_params'][u'kernel'],
basestring):
raise Exception(
'Expected \'kernel\' to be a string but found \'%s\'' %
type(classifier_data[u'best_params'][u'kernel']))
# Validate that all the strings in classifier data are of unicode type.
classifier_utils.unicode_validator_for_classifier_data(classifier_data)
# Validate that entire classifier data is json serializable and
# does not raise any exception.
json.dumps(classifier_data)
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/utils/estimator_checks.py | 21 | 51976 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
ScopeFoundry/FoundryDataBrowser | viewers/hyperspec_h5.py | 1 | 9105 | #from ScopeFoundry.data_browser import HyperSpectralBaseView
from FoundryDataBrowser.viewers.hyperspec_base_view import HyperSpectralBaseView
from FoundryDataBrowser.viewers.plot_n_fit import PeakUtilsFitter
import numpy as np
import h5py
import pyqtgraph as pg
class HyperSpecH5View(HyperSpectralBaseView):
name = 'hyperspec_h5'
supported_measurements = ['m4_hyperspectral_2d_scan',
'andor_hyperspec_scan',
'hyperspectral_2d_scan',
'fiber_winspec_scan',
'hyperspec_picam_mcl',
'asi_hyperspec_scan',
'asi_OO_hyperspec_scan',
'oo_asi_hyperspec_scan',
'andor_asi_hyperspec_scan',]
def scan_specific_setup(self):
pass
def setup(self):
self.settings.New('sample', dtype=str, initial='')
HyperSpectralBaseView.setup(self)
self.plot_n_fit.add_fitter(PeakUtilsFitter())
def is_file_supported(self, fname):
return np.any([(meas_name in fname)
for meas_name in self.supported_measurements])
def reset(self):
HyperSpectralBaseView.reset(self)
if hasattr(self, 'dat'):
self.dat.close()
del self.dat
def load_data(self, fname):
print(self.name, 'loading', fname)
self.dat = h5py.File(fname)
for meas_name in self.supported_measurements:
if meas_name in self.dat['measurement']:
self.M = self.dat['measurement'][meas_name]
self.spec_map = None
for map_name in ['hyperspectral_map', 'spec_map']:
if map_name in self.M:
self.spec_map = np.array(self.M[map_name])
if 'h_span' in self.M['settings'].attrs:
h_span = float(self.M['settings'].attrs['h_span'])
units = self.M['settings/units'].attrs['h0']
self.set_scalebar_params(h_span, units)
if len(self.spec_map.shape) == 4:
self.spec_map = self.spec_map[0, :, :, :]
if 'dark_indices' in list(self.M.keys()):
self.spec_map = np.delete(self.spec_map,
self.M['dark_indices'],
-1)
if self.spec_map is None:
self.spec_map = np.zeros((10,10,10))
raise ValueError("Specmap not found")
self.hyperspec_data = self.spec_map
self.display_image = self.hyperspec_data.sum(axis=-1)
self.spec_x_array = np.arange(self.hyperspec_data.shape[-1])
for x_axis_name in ['wavelength', 'wls', 'wave_numbers',
'raman_shifts']:
if x_axis_name in self.M:
x_array = np.array(self.M[x_axis_name])
if 'dark_indices' in list(self.M.keys()):
x_array = np.delete(x_array,
np.array(self.M['dark_indices']),
0)
self.add_spec_x_array(x_axis_name, x_array)
self.x_axis.update_value(x_axis_name)
sample = self.dat['app/settings'].attrs['sample']
self.settings.sample.update_value(sample)
def matplotlib_colormap_to_pg_colormap(colormap_name, n_ticks=16):
'''
============= =========================================================
**Arguments**
colormap_name (string) name of a matplotlib colormap i.e. 'viridis'
n_ticks (int) Number of ticks to create when dict of functions
is used. Otherwise unused.
============= =========================================================
returns: (pgColormap) pyqtgraph colormap
primary Usage: <pg.ImageView>.setColorMap(pgColormap)
requires: cmapToColormap by Sebastian Hoefer
https://github.com/pyqtgraph/pyqtgraph/issues/561
'''
from matplotlib import cm
pos, rgba_colors = zip(*cmapToColormap(getattr(cm, colormap_name)), n_ticks)
pgColormap = pg.ColorMap(pos, rgba_colors)
return pgColormap
def cmapToColormap(cmap, nTicks=16):
"""
Converts a Matplotlib cmap to pyqtgraphs colormaps. No dependency on
matplotlib.
Parameters:
*cmap*: Cmap object. Imported from matplotlib.cm.*
*nTicks*: Number of ticks to create when dict of functions is used.
Otherwise unused.
author: Sebastian Hoefer
"""
import collections
# Case #1: a dictionary with 'red'/'green'/'blue' values as list of ranges (e.g. 'jet')
# The parameter 'cmap' is a 'matplotlib.colors.LinearSegmentedColormap' instance ...
if hasattr(cmap, '_segmentdata'):
colordata = getattr(cmap, '_segmentdata')
if ('red' in colordata) and isinstance(colordata['red'], collections.Sequence):
# collect the color ranges from all channels into one dict to get unique indices
posDict = {}
for idx, channel in enumerate(('red', 'green', 'blue')):
for colorRange in colordata[channel]:
posDict.setdefault(colorRange[0], [-1, -1, -1])[idx] = colorRange[2]
indexList = list(posDict.keys())
indexList.sort()
# interpolate missing values (== -1)
for channel in range(3): # R,G,B
startIdx = indexList[0]
emptyIdx = []
for curIdx in indexList:
if posDict[curIdx][channel] == -1:
emptyIdx.append(curIdx)
elif curIdx != indexList[0]:
for eIdx in emptyIdx:
rPos = (eIdx - startIdx) / (curIdx - startIdx)
vStart = posDict[startIdx][channel]
vRange = (posDict[curIdx][channel] - posDict[startIdx][channel])
posDict[eIdx][channel] = rPos * vRange + vStart
startIdx = curIdx
del emptyIdx[:]
for channel in range(3): # R,G,B
for curIdx in indexList:
posDict[curIdx][channel] *= 255
rgb_list = [[i, posDict[i]] for i in indexList]
# Case #2: a dictionary with 'red'/'green'/'blue' values as functions (e.g. 'gnuplot')
elif ('red' in colordata) and isinstance(colordata['red'], collections.Callable):
indices = np.linspace(0., 1., nTicks)
luts = [np.clip(np.array(colordata[rgb](indices), dtype=np.float), 0, 1) * 255 \
for rgb in ('red', 'green', 'blue')]
rgb_list = zip(indices, list(zip(*luts)))
# If the parameter 'cmap' is a 'matplotlib.colors.ListedColormap' instance, with the attributes 'colors' and 'N'
elif hasattr(cmap, 'colors') and hasattr(cmap, 'N'):
colordata = getattr(cmap, 'colors')
# Case #3: a list with RGB values (e.g. 'seismic')
if len(colordata[0]) == 3:
indices = np.linspace(0., 1., len(colordata))
scaledRgbTuples = [(rgbTuple[0] * 255, rgbTuple[1] * 255, rgbTuple[2] * 255) for rgbTuple in colordata]
rgb_list = zip(indices, scaledRgbTuples)
# Case #4: a list of tuples with positions and RGB-values (e.g. 'terrain')
# -> this section is probably not needed anymore!?
elif len(colordata[0]) == 2:
rgb_list = [(idx, (vals[0] * 255, vals[1] * 255, vals[2] * 255)) for idx, vals in colordata]
# Case #X: unknown format or datatype was the wrong object type
else:
raise ValueError("[cmapToColormap] Unknown cmap format or not a cmap!")
# Convert the RGB float values to RGBA integer values
return list([(pos, (int(r), int(g), int(b), 255)) for pos, (r, g, b) in rgb_list])
#
# class HyperSpecSpecMedianH5View(HyperSpectralBaseView):
#
# name = 'hyperspec_spec_median_npz'
#
# def is_file_supported(self, fname):
# return "_spec_scan.npz" in fname
#
#
# def load_data(self, fname):
# self.dat = np.load(fname)
#
# self.spec_map = self.dat['spec_map']
# self.wls = self.dat['wls']
# self.integrated_count_map = self.dat['integrated_count_map']
# self.spec_median_map = np.apply_along_axis(spectral_median, 2,
# self.spec_map[:,:,:],
# self.wls, 0)
# self.hyperspec_data = self.spec_map
# self.display_image = self.spec_median_map
# self.spec_x_array = self.wls
#
# def scan_specific_setup(self):
# self.spec_plot.setLabel('left', 'Intensity', units='counts')
# self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
#
# if __name__ == '__main__':
# import sys
#
# app = DataBrowser(sys.argv)
# app.load_view(HyperSpecH5View(app))
#
# sys.exit(app.exec_())
| bsd-3-clause |
bejar/kemlglearn | kemlglearn/cluster/consensus/SimpleConsensusClustering.py | 1 | 4669 | """
.. module:: SimpleConsensusClustering
SimpleConsensusClustering
*************
:Description: SimpleConsensusClustering
:Authors: bejar
:Version:
:Created on: 22/01/2015 10:46
"""
__author__ = 'bejar'
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster import KMeans, SpectralClustering
from numpy.random import randint
from itertools import product
from joblib import Parallel, delayed
from scipy.sparse import csc_matrix
class SimpleConsensusClustering(BaseEstimator, ClusterMixin, TransformerMixin):
"""Simple Consensus Clustering Algorithm
Pararemeters:
n_clusters: int
Number of clusters of the base clusterers and the consensus cluster
base: string
base clusterer ['kmeans']
n_components: int
number of components of the consensus
consensus: string
consensus method ['coincidence']
"""
def __init__(self, n_clusters, n_clusters_base=None, ncb_rand=False, base='kmeans', n_components=10,
consensus='coincidence', consensus2='kmeans'):
self.n_clusters = n_clusters
if n_clusters_base is None:
self.n_clusters_base = n_clusters
else:
self.n_clusters_base = n_clusters_base
self.ncb_rand = ncb_rand
self.cluster_centers_ = None
self.labels_ = None
self.cluster_sizes_ = None
self.base = base
self.n_components = n_components
self.consensus = consensus
self.consensus2 = consensus2
def fit(self, X):
"""
Clusters the examples
:param X:
:return:
"""
if self.consensus == 'coincidence':
self.cluster_centers_, self.labels_ = self._fit_process_coincidence(X)
def _process_components(self, ncl, base, X):
"""
:param num:
:return:
"""
if base == 'kmeans':
km = KMeans(n_clusters=ncl, n_init=1, init='random')
elif base == 'spectral':
km = SpectralClustering(n_clusters=ncl, assign_labels='discretize',
affinity='nearest_neighbors', n_neighbors=30)
km.fit(X)
return km.labels_
def _fit_process_coincidence(self, X):
"""
Obtains n_components kmeans clustering, compute the coincidence matrix and applies kmeans to that coincidence
matrix
:param X:
:return:
"""
clabels = Parallel(n_jobs=-1)(delayed(self._process_components)(
self.n_clusters_base if self.ncb_rand else randint(2, self.n_clusters_base + 1),
self.base,
X) for i in range(self.n_components))
coin_matrix = np.zeros((X.shape[0], X.shape[0]))
for l in clabels:
coin_matrix += (l[None, :] == l[:, None])
coin_matrix /= self.n_components
if self.consensus2 == 'kmeans':
kmc = KMeans(n_clusters=self.n_clusters)
kmc.fit(coin_matrix)
return kmc.cluster_centers_, kmc.labels_
elif self.consensus2 == 'spectral':
kmc = SpectralClustering(n_clusters=self.n_clusters, assign_labels='discretize',
affinity='nearest_neighbors', n_neighbors=40)
kmc.fit(coin_matrix)
return None, kmc.labels_
if __name__ == '__main__':
from sklearn import datasets
from sklearn.metrics import adjusted_mutual_info_score
from kemlglearn.datasets import make_blobs
import matplotlib.pyplot as plt
import time
# data = datasets.load_iris()['data']
# labels = datasets.load_iris()['target']
# data, labels = make_blobs(n_samples=[100, 200], n_features=2, centers=[[1,1], [0,0]], random_state=2, cluster_std=[0.2, 0.4])
data, labels = datasets.make_circles(n_samples=400, noise=0.1, random_state=4, factor=0.3)
km = KMeans(n_clusters=2)
cons = SimpleConsensusClustering(n_clusters=2, n_clusters_base=20, n_components=1000, ncb_rand=False)
lkm = km.fit_predict(data)
t = time.time()
cons.fit(data)
print('T=', time.time() - t)
lcons = cons.labels_
print(adjusted_mutual_info_score(lkm, labels))
print(adjusted_mutual_info_score(lcons, labels))
fig = plt.figure()
# ax = fig.gca(projection='3d')
# pl.scatter(X[:, 1], X[:, 2], zs=X[:, 0], c=ld.labels_, s=25)
#
ax = fig.add_subplot(131)
plt.scatter(data[:, 0], data[:, 1], c=labels)
ax = fig.add_subplot(132)
plt.scatter(data[:, 0], data[:, 1], c=lkm)
ax = fig.add_subplot(133)
plt.scatter(data[:, 0], data[:, 1], c=lcons)
plt.show()
| mit |
femtotrader/arctic-updater | samples/unique.py | 1 | 2387 | import time
import logging
from collections import OrderedDict
import pandas as pd
pd.set_option('max_rows', 10)
pd.set_option('expand_frame_repr', False)
pd.set_option('max_columns', 6)
from arctic_updater.updaters.truefx import TrueFXUpdater
logging.Formatter.converter = time.gmtime
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
my_updater = TrueFXUpdater()
#symbols = ['EURGBP', 'EURUSD', 'USDCHF', 'USDJPY']
symbols = my_updater.symbols
logger.info("processing %s" % symbols)
d_df = OrderedDict()
year, month = 2015, 11
#for symbol in symbols:
# filename = my_updater._filename(symbol, year, month, '.h5')
# df = pd.read_hdf(filename, "data")
# assert df.index.is_unique
for i, symbol in enumerate(symbols):
logger.info("%d / %d : %s" % (i+1, len(symbols), symbol))
df = my_updater._read_one_month(symbol, year, month)
logger.info("build unique index")
df = df.reset_index()
df = df.sort_values('Date')
#df['Date'] = df['Date']+(df['Date'].groupby((df['Date'] != df['Date'].shift()).cumsum()).cumcount()).values.astype('timedelta64[ns]')
df.loc[df['Date'] == df['Date'].shift(), 'Date'] = df['Date'] + ((df['Date'] == df['Date'].shift()).cumsum()).astype('timedelta64[ns]')
#remove helper column
df = df.drop(['us', 'Symbol'], axis=1)
#set column Date as index
df = df.set_index('Date', verify_integrity=True)
# Save to HDF5
filename = my_updater._filename(symbol, year, month, '.h5')
logger.info("save to %s" % filename)
df.to_hdf(filename, "data", mode='w', complevel=5, complib='zlib')
d_df[symbol] = df
logger.info("concatenate")
df_all = pd.concat(d_df, axis=1)
logger.info(df_all)
df_all = df_all.swaplevel(0, 1, axis=1)
d = {}
filename = "all-panel-%s-%04d-%2d.h5" % ('ask', year, month)
for col in ['Bid', 'Ask']:
d[col] = df_all[col]
panel = pd.Panel.from_dict(d)
panel.to_hdf(filename, "data", mode='w', complevel=5, complib='zlib')
#filename = "all-%s-%04d-%2d.h5" % ('bid', year, month)
#logger.info("save to %s" % filename)
#df_all['Bid'].to_hdf(filename, "data", mode='w', complevel=5, complib='zlib')
#filename = "all-%s-%04d-%2d.h5" % ('ask', year, month)
#logger.info("save to %s" % filename)
#df_all['Ask'].to_hdf(filename, "data", mode='w', complevel=5, complib='zlib')
logger.info("DONE")
| isc |
adammenges/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
dsc381/yahoo_cqa | fex.py | 2 | 5492 | import os
import json
import numpy
import codecs
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.metrics import accuracy_score, precision_score
def extract(f):
def sample(f):
hits = []
misses = []
for l in f:
if "DESC" in l:
hits.append(l)
else:
misses.append(l)
fi = []
i = 0
for ex in hits:
fi.append(ex)
fi.append(misses[i])
i+=1
fi.append(misses[i])
i += 1
return fi
def get_labels(f):
'''assumes file is LABEL: text'''
labels = [[] for i in xrange(5)]
category = []
corpus = []
for l in f:
temp = l.split(' ',1)
corpus.append(temp[1:][0])
category.append(temp[0])
i = 0
for line in category:
if "HUM" in line:
labels[0].append(1)
else:
labels[0].append(0)
if "ENTY:" in line:
labels[1].append(1)
else:
labels[1].append(0)
if "NUM:" in line:
labels[2].append(1)
else:
labels[2].append(0)
if "LOC" in line:
labels[3].append(1)
else:
labels[3].append(0)
if "ABB" in line:
labels[4].append(1)
else:
labels[4].append(0)
return corpus,labels
def desc_lables(fi):
'''As DESC is so low in represented, we use random sampling to build SVM'''
corpus = []
category = []
labels =[]
for l in fi:
temp = l.split(' ',1)
corpus.append(temp[1:][0])
category.append(temp[0])
for line in category:
if "DESC" in line:
labels.append(1)
else:
labels.append(0)
return corpus,labels
corpus = []
corpus,Y = get_labels(f)
f.seek(0)
fi = sample(f)
d_corpus,d_labels = desc_lables(fi)
Y.append(d_labels)
#yahoo data addition
corp = []
q = open("q-a_pair.json","r")
json_q = json.load(q)
for l in json_q:
corp.append(l["question"])
q.close()
vectorizer = CountVectorizer(min_df=1,stop_words=None)
X = vectorizer.fit_transform(corpus+corp)
miniX = vectorizer.fit_transform(d_corpus+corp)
return X,miniX,Y
if __name__ == '__main__':
stop_w = ['a',
'about','above','after','again','against',
'all','am','an','and','any','are','aren\'t',
'as','at','be','because','been','before','being',
'below','between','both','but','by','can\'t','cannot',
'could','couldn\'t','did','didn\'t','do','does','doesn\'t',
'doing','don\'t','down','during','each','few','for',
'from','further','had','hadn\'t','has','hasn\'t','have',
'haven\'t','having','he','he\'d','he\'ll','he\'s','her',
'here','here\'s','hers','herself','him','himself','his',
'i','i\'d','i\'ll','i\'m','i\'ve','if','in',
'into','is','isn\'t','it','it\'s','its','itself',
'let\'s','me','more','most','mustn\'t','my','myself',
'no','nor','not','of','off','on','once',
'only','or','other','ought','our','ours','ourselves',
'out','over','own','same','shan\'t','she','she\'d',
'she\'ll','she\'s','should','shouldn\'t','so','some','such',
'than','that','that\'s','the','their','theirs','them',
'themselves','then','there','there\'s','these','they','they\'d',
'they\'ll','they\'re','they\'ve','this','those','through','to',
'too','under','until','up','very','was','wasn\'t',
'we','we\'d','we\'ll','we\'re','we\'ve','were','weren\'t',
'while','with','won\'t','would','wouldn\'t','you','you\'d',
'you\'ll','you\'re','you\'ve','your','yours','yourself','yourselves']
f = codecs.open(os.path.expanduser("~/Data/cqa/uiuc/train_5500.utf8.txt"),encoding='utf-8',errors='ignore')
X,miniX,Y = extract(f)
f.close()
train_set,d_train_set = X[:len(Y[0])], miniX[:len(Y[5])]
test_set,d_test_set = X[len(Y[0]):], miniX[len(Y[5]):]
svms = []
for i in xrange(6):
svms.append(svm.LinearSVC())
print "training"
for sv,i in zip(svms,xrange(6)):
print str(i)+" /5"
if i == 5:
sv.fit(d_train_set,Y[i])
else:
sv.fit(train_set,Y[i])
# sv.fit(d_train_set,Y[i])
# else:
# sv.fit(train_set,Y[i])
results = []
print "evaulating"
for sv,i in zip(svms,xrange(6)):
print str(i)+" /5"
if i ==5:
results.append(sv.predict(d_test_set))
print d_test_set.shape[0]
print results[5].shape
else:
results.append(sv.predict(test_set))
current = numpy.zeros(len(results[1]))
numpy.savetxt("desc_d",results[5].T, fmt= "%d")
for res in zip(results[:-1]):
current = numpy.logical_or(res,current)
current = numpy.logical_or(results[5].resize(len(current)),numpy.invert(current))
# desc_q = []
# i = 0
# for i in results:
# if i == 1:
# desc_q.append(i)
# i += 1
numpy.savetxt("desc_uid",current.T, fmt= "%d")
# print list(results).count(1)
# clz = svm.SVC(C=1)
# results = clz.predict(X)
# print precision_score(results,Y)
| gpl-2.0 |
qingshuimonk/STA663 | Vanilla_GAN.py | 1 | 3445 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib
# matplotlib.use('PS')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from time import gmtime, strftime
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, 784])
D_W1 = tf.Variable(xavier_init([784, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
Z = tf.placeholder(tf.float32, shape=[None, 100])
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def plot(samples):
fig = plt.figure(figsize=(8, 2))
gs = gridspec.GridSpec(2, 8)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
# G_loss = -tf.reduce_mean(tf.log(D_fake))
# Alternative losses:
# -------------------
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
mb_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for it in range(100000):
if it == 99999:
samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})
fig = plot(samples)
# plt.savefig('/]data/GAN_pics/{}.png'.format(strftime("%m-%d_%H:%M:%S", gmtime())), bbox_inches='tight')
plt.show(fig)
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
| mit |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/core/algorithms.py | 9 | 18486 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn
import numpy as np
from pandas import compat, lib, _np_version_under1p8
import pandas.core.common as com
import pandas.algos as algos
import pandas.hashtable as htable
from pandas.compat import string_types
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, string_types):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
result = _hashtable_algo(f, values.dtype, np.int64)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas.core.series import Series
result = Series(result.ravel()).replace(-1,na_sentinel).values.reshape(result.shape)
return result
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
of values
Parameters
----------
values : array-like
Returns
-------
uniques
"""
values = com._asarray_tuplesafe(values)
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not com.is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
comps = np.asarray(comps)
if not com.is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
f = lambda x, y: np.in1d(x,np.asarray(list(y)))
else:
f = lambda x, y: lib.ismember_int64(x,set(y))
# may need i8 conversion for proper membership testing
if com.is_datetime64_dtype(comps):
from pandas.tseries.tools import to_datetime
values = to_datetime(values)._values.view('i8')
comps = comps.view('i8')
elif com.is_timedelta64_dtype(comps):
from pandas.tseries.timedeltas import to_timedelta
values = to_timedelta(values)._values.view('i8')
comps = comps.view('i8')
elif com.is_int64_dtype(comps):
pass
else:
f = lambda x, y: lib.ismember(x, set(values))
return f(comps, values)
def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
"""
if com.is_float_dtype(dtype):
return f(htable.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(htable.Int64HashTable, com._ensure_int64)
elif com.is_datetime64_dtype(dtype):
return_dtype = return_dtype or 'M8[ns]'
return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
elif com.is_timedelta64_dtype(dtype):
return_dtype = return_dtype or 'm8[ns]'
return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
else:
return f(htable.PyObjectHashTable, com._ensure_object)
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
table = table_type(min(len(index), 1000000))
table.map_locations(index)
return table.lookup(values)
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques = table.unique(values)
return type_caster(uniques)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
order : deprecated
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or Series
note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex
"""
if order is not None:
msg = "order is deprecated. See https://github.com/pydata/pandas/issues/6926"
warn(msg, FutureWarning, stacklevel=2)
from pandas.core.index import Index
from pandas.core.series import Series
vals = np.asarray(values)
is_datetime = com.is_datetime64_dtype(vals)
is_timedelta = com.is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
uniques = vec_klass()
labels = table.get_labels(vals, uniques, 0, na_sentinel)
labels = com._ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
try:
sorter = uniques.argsort()
except:
# unorderable in py3 if mixed str/int
t = hash_klass(len(uniques))
t.map_locations(com._ensure_object(uniques))
# order ints before strings
ordered = np.concatenate([
np.sort(np.array([ e for i, e in enumerate(uniques) if f(e) ],dtype=object)) for f in [ lambda x: not isinstance(x,string_types),
lambda x: isinstance(x,string_types) ]
])
sorter = com._ensure_platform_int(t.lookup(com._ensure_object(ordered)))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
uniques = uniques.take(sorter)
if is_datetime:
uniques = uniques.astype('M8[ns]')
elif is_timedelta:
uniques = uniques.astype('m8[ns]')
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series
from pandas.tools.tile import cut
from pandas import Index, PeriodIndex, DatetimeIndex
name = getattr(values, 'name', None)
values = Series(values).values
if bins is not None:
try:
cat, bins = cut(values, bins, retbins=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
values = cat.codes
if com.is_categorical_dtype(values.dtype):
result = values.value_counts(dropna)
else:
dtype = values.dtype
is_period = com.is_period_arraylike(values)
is_datetimetz = com.is_datetimetz(values)
if com.is_datetime_or_timedelta_dtype(dtype) or is_period or is_datetimetz:
if is_period:
values = PeriodIndex(values)
elif is_datetimetz:
tz = getattr(values, 'tz', None)
values = DatetimeIndex(values).tz_localize(None)
values = values.view(np.int64)
keys, counts = htable.value_count_scalar64(values, dropna)
if dropna:
from pandas.tslib import iNaT
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
# localize to the original tz if necessary
if is_datetimetz:
keys = DatetimeIndex(keys).tz_localize(tz)
# convert the keys back to the dtype we came in
else:
keys = keys.astype(dtype)
elif com.is_integer_dtype(dtype):
values = com._ensure_int64(values)
keys, counts = htable.value_count_scalar64(values, dropna)
elif com.is_float_dtype(dtype):
values = com._ensure_float64(values)
keys, counts = htable.value_count_scalar64(values, dropna)
else:
values = com._ensure_object(values)
mask = com.isnull(values)
keys, counts = htable.value_count_object(values, mask)
if not dropna and mask.any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if bins is not None:
# TODO: This next line should be more efficient
result = result.reindex(np.arange(len(cat.categories)), fill_value=0)
result.index = bins[:-1]
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(values.size)
return result
def mode(values):
"""Returns the mode or mode(s) of the passed Series or ndarray (sorted)"""
# must sort because hash order isn't necessarily defined.
from pandas.core.series import Series
if isinstance(values, Series):
constructor = values._constructor
values = values.values
else:
values = np.asanyarray(values)
constructor = Series
dtype = values.dtype
if com.is_integer_dtype(values):
values = com._ensure_int64(values)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
dtype = values.dtype
values = values.view(np.int64)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif com.is_categorical_dtype(values):
result = constructor(values.mode())
else:
mask = com.isnull(values)
values = com._ensure_object(values)
res = htable.mode_object(values, mask)
try:
res = sorted(res)
except TypeError as e:
warn("Unable to sort modes: %s" % e)
result = constructor(res, dtype=dtype)
return result
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
return ranks
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = com.isnull(x)
x = x[~mask]
values = np.sort(x)
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if np.isscalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_data_algo(values, func_map):
mask = None
if com.is_float_dtype(values):
f = func_map['float64']
values = com._ensure_float64(values)
elif com.needs_i8_conversion(values):
# if we have NaT, punt to object dtype
mask = com.isnull(values)
if mask.ravel().any():
f = func_map['generic']
values = com._ensure_object(values)
values[mask] = np.nan
else:
f = func_map['int64']
values = values.view('i8')
elif com.is_integer_dtype(values):
f = func_map['int64']
values = com._ensure_int64(values)
else:
f = func_map['generic']
values = com._ensure_object(values)
return f, values
def group_position(*args):
"""
Get group position
"""
from collections import defaultdict
table = defaultdict(int)
result = []
for tup in zip(*args):
result.append(table[tup])
table[tup] += 1
return result
_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}
def _finalize_nsmallest(arr, kth_val, n, keep, narr):
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if keep == 'last':
# reverse indices
return narr - 1 - inds
else:
return inds
def nsmallest(arr, n, keep='first'):
'''
Find the indices of the n smallest values of a numpy array.
Note: Fails silently with NaN.
'''
if keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
kth_val = algos.kth_smallest(arr.copy(), n - 1)
return _finalize_nsmallest(arr, kth_val, n, keep, narr)
def nlargest(arr, n, keep='first'):
"""
Find the indices of the n largest values of a numpy array.
Note: Fails silently with NaN.
"""
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
return nsmallest(-arr, n, keep=keep)
def select_n_slow(dropped, n, keep, method):
reverse_it = (keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
def select_n(series, n, keep, method):
"""Implement n largest/smallest.
Parameters
----------
n : int
keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
Returns
-------
nordered : Series
"""
dtype = series.dtype
if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64,
np.timedelta64)):
raise TypeError("Cannot use method %r with dtype %s" % (method, dtype))
if keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
if n <= 0:
return series[[]]
dropped = series.dropna()
if n >= len(series):
return select_n_slow(dropped, n, keep, method)
inds = _select_methods[method](dropped.values, n, keep)
return dropped.iloc[inds]
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'generic': algos.rank_1d_generic
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'generic': algos.rank_2d_generic
}
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'generic': (htable.PyObjectHashTable, htable.ObjectVector)
}
| artistic-2.0 |
arunchaganty/presidential-debates | python/doc2vec.py | 2 | 5109 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Label documents with doc2vec.
"""
import numpy as np
import os
from collections import Counter
from pprint import pprint
import csv
import sys
import gensim
from gensim.corpora import Dictionary, HashDictionary, MmCorpus
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
from sklearn.neighbors import NearestNeighbors
from happyfuntokenizing import Tokenizer
import ipdb
norm = np.linalg.norm
STOPWORDS = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now',
'.', ',', '@', '!', '#', '$', '%', '^', '&', '*', ':', ";", '"', "'", "?",
]
TOKENIZER = Tokenizer()
def tokenize(text):
"""Tokenize the text (using bi-grams)
@returns:list[str]"""
return [tok for tok in TOKENIZER.tokenize(text)]
return #[tok for tok in TOKENIZER.tokenize(text) if tok not in STOPWORDS]
def load_data(stream):
"""
Return a list of tokens.
"""
reader = csv.reader(stream, delimiter='\t')
header = next(reader)
assert header == ["id", "text"]
return reader
def embed_documents(documents, size=100):
"""Use Doc2Vec to embed documents in a d-dimensional space.
@documents:list[list[str]] - tokenized documents
@returns:numpy.array - of (num_docs) x (dimension)
"""
documents = (TaggedDocument(words=words, tags=[id]) for (id,words) in documents)
model = Doc2Vec(documents, size=size, window=8, min_count=5, workers=4)
return model
#def find_nearest_neighbors(vecs):
# nbrs = NearestNeighbors(n_neighbors=10, algorithm='ball_tree').fit(vecs)
# distances, indices = nbrs.kneighbors(vecs)
# return [(i, j, distance) for i in range(len(vecs)) for j, distance in nbrs.kneighbors(vecs[i])]
#def find_nearest_neighbors(vecs):
# for i, v in enumerate(vecs):
# distances = norm(vecs - v, axis = 1)
# neighbors = distances.argsort()[1:11]
# for j in neighbors:
# yield (i, j, np.exp(-distances[j]))
def find_nearest_neighbors(X):
# Normalize the vectors
X = (X.T / norm(X, axis=1)).T
for i, x in enumerate(X):
# compute inner product.
distances = X.dot(x)
neighbors = distances.argsort()[1:11]
for j in neighbors:
yield (i, j, distances[j])
def do_command(args):
# Load data
data = load_data(args.input)
#ids, documents = zip(*data)
data = [(id, tokenize(doc)) for id, doc in data]
ids = [id for id, _ in data]
if not os.path.exists(args.modelfile):
model = embed_documents(data)
# Save model
model.save(args.modelfile)
else:
model = Doc2Vec.load(args.modelfile)
#map(model.infer_tokens, tokenized)
print("Loaded model.")
# Do k-nearest neighbors search.
writer = csv.writer(args.output, delimiter='\t')
writer.writerow(["id1", "id2", "score"])
count = int(args.count) if args.count > 0 else len(model.docvecs)
vectors = np.array([model.docvecs[i] for i in range(count)])
del model # clear up memory
for i, j, score in find_nearest_neighbors(vectors):
id1, id2 = ids[i], ids[j]
writer.writerow([id1, id2, score])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='' )
parser.add_argument('--modelfile', type=str, default='doc2vec.model', help="Model file")
parser.add_argument('--nneighbors', type=str, default='doc2vec.neighbors', help="Neighbors")
parser.add_argument('--input', type=argparse.FileType('r'), default=sys.stdin, help="Input file")
parser.add_argument('--output', type=argparse.FileType('w'), default=sys.stdout, help="Output vectors.")
parser.add_argument('--count', type=float, default=1e5, help="number of vectors to use.")
parser.set_defaults(func=do_command)
#subparsers = parser.add_subparsers()
#command_parser = subparsers.add_parser('command', help='' )
#command_parser.set_defaults(func=do_command)
ARGS = parser.parse_args()
ARGS.func(ARGS)
| mit |
anirudhjayaraman/scikit-learn | sklearn/datasets/samples_generator.py | 103 | 56423 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
John-Jumper/Upside-MD | py/sim_timeseries.py | 1 | 5583 | #!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
import tables as tb
import collections
from glob import glob
import re
import sys,os
import cPickle as cp
from glob import glob
import pandas as pd
from gzip import GzipFile
import time
upside_dir = os.path.expanduser('~/upside/')
if upside_dir + 'src' not in sys.path: sys.path = [upside_dir+'src'] + sys.path
import run_upside as ru
deg = np.pi/180.
def process_file(a):
x,skip,equil_fraction,do_traj = a
protein = os.path.basename(x).split('_')[0]
for n_try in range(3):
try:
with tb.open_file(x) as t:
# print t.root._v_children.keys(), 'output' in t.root, 'output_previous_0' in t.root
output_names = []
i = 0
while 'output_previous_%i'%i in t.root:
output_names.append('output_previous_%i'%i)
i += 1
if 'output' in t.root:
output_names.append('output')
if not output_names:
return None
last_time = 0.
df_list = []
for onm in output_names:
sl = slice(skip,None,skip)
n = t.get_node('/'+onm)
sim_time = n.time[sl] + last_time
last_time = sim_time[-1]
pos=n.pos[sl,0]
pot=n.potential[sl,0]
T=n.temperature[0,0]
df = pd.DataFrame(dict(
time=sim_time,
energy=pot,
N_res = pos.shape[1]//3,
protein=protein,
initial="init_"+str(t.root.input.args._v_attrs.initial_structures),
T=T+np.zeros_like(pot),
Temp=np.array(['T=%.3f'%T]*len(sim_time)),
HBond=0.5*(n.hbond[sl]>0.05).sum(axis=1), # 0.5 takes care of double counting
Rg = np.sqrt(np.var(pos,axis=1).sum(axis=-1)),
))
df['RMSD'] = ru.traj_rmsd(pos[:,9:-9],t.root.target.pos[9:-9])
if do_traj:
# copy in the position with the object dtype
df['pos'] = pd.Series(list(pos[:,1::3].astype('f4').copy()), dtype=np.object)
if 'replica_index' in n:
df['replica'] = n.replica_index[sl,0]
df['method'] = 'replex'
else:
df['replica'] = 0
df['method'] = 'constantT'
print x, onm
df_list.append(df)
df = pd.concat(df_list)
df['filename'] = x
df['frame'] = np.arange(len(df['time']))
df['phase'] = np.where(((df['frame']<df['frame'].max()*equil_fraction).as_matrix()),
'equilibration','production')
return df
except Exception as e:
print e
# There is a decent chance that the exception is due to Upside writing to the .h5 concurrently
# We will try again after waiting to allow the .h5 to get a consistent state
time.sleep(2)
continue
print x, 'FAILURE'
return None
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-j', default=1, type=int, help = 'number of processes to use')
parser.add_argument('--output-csv-gz', required=True, help='Path to output compressed CSV output')
parser.add_argument('--output-traj-h5', default='', help='Path to output trajectories in .h5 format')
parser.add_argument('--skip', default=32, type=int, help='Analyze every n-th frame (default 32)')
parser.add_argument('--equil-fraction', default=1./3., type=float,
help='Fraction of simulation to call equilibration (default 0.333)')
parser.add_argument('--exclude-pattern', default='',
help='regular expression pattern to exclude configs from analysis')
parser.add_argument('configs', nargs='+', help='Upside trajectories to analyze')
args = parser.parse_args()
print args.configs
do_traj = bool(args.output_traj_h5)
if args.exclude_pattern:
configs = [x for x in args.configs if not re.search(args.exclude_pattern, x)]
else:
configs = list(args.configs)
pool = Pool(processes=args.j)
all_output = list(pool.map(process_file, [(c,args.skip,args.equil_fraction,do_traj) for c in configs]))
df = pd.concat([x for x in all_output if x is not None], ignore_index=True)
print 'number of read failures', len([x for x in all_output if x is None])
print df.index
if do_traj:
import tables as tb
with tb.open_file(args.output_traj_h5,'w') as t:
filt = tb.Filters(complib='zlib', complevel=5, fletcher32=True)
for protein, df_protein in df.groupby('protein'):
print 'traj', protein
g = t.create_group(t.root, protein)
t.create_earray(g, 'traj', obj=np.array(list(df_protein.pos), dtype='f4'), filters=filt)
t.create_earray(g, 'index', obj=np.array(df_protein.index.values, dtype='i4'), filters=filt)
del df['pos'] # do not put position in CSV file
print 'CSV output'
with GzipFile(args.output_csv_gz,'wt') as f:
df.to_csv(f)
if __name__ == '__main__':
main()
| gpl-2.0 |
jmuhlich/pysb | pysb/tests/test_simulator_scipy.py | 5 | 19613 | from pysb.testing import *
import sys
import copy
import numpy as np
from pysb import Monomer, Parameter, Initial, Observable, Rule, Expression
from pysb.simulator import ScipyOdeSimulator, InconsistentParameterError
from pysb.simulator.scipyode import CythonRhsBuilder
from pysb.examples import robertson, earm_1_0, tyson_oscillator, localfunc
import unittest
import pandas as pd
class TestScipySimulatorBase(object):
@with_model
def setUp(self):
Monomer('A', ['a'])
Monomer('B', ['b'])
Parameter('ksynthA', 100)
Parameter('ksynthB', 100)
Parameter('kbindAB', 100)
Parameter('A_init', 0)
Parameter('B_init', 0)
Initial(A(a=None), A_init)
Initial(B(b=None), B_init)
Observable("A_free", A(a=None))
Observable("B_free", B(b=None))
Observable("AB_complex", A(a=1) % B(b=1))
Rule('A_synth', None >> A(a=None), ksynthA)
Rule('B_synth', None >> B(b=None), ksynthB)
Rule('AB_bind', A(a=None) + B(b=None) >> A(a=1) % B(b=1), kbindAB)
self.model = model
# Convenience shortcut for accessing model monomer objects
self.mon = lambda m: self.model.monomers[m]
# This timespan is chosen to be enough to trigger a Jacobian evaluation
# on the various solvers.
self.time = np.linspace(0, 1)
self.sim = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='vode')
def tearDown(self):
self.model = None
self.time = None
self.sim = None
class TestScipySimulatorSingle(TestScipySimulatorBase):
def test_vode_solver_run(self):
"""Test vode."""
simres = self.sim.run()
assert simres._nsims == 1
@raises(ValueError)
def test_invalid_init_kwarg(self):
ScipyOdeSimulator(self.model, tspan=self.time, spam='eggs')
def test_lsoda_solver_run(self):
"""Test lsoda."""
solver_lsoda = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='lsoda')
solver_lsoda.run()
def test_lsoda_jac_solver_run(self):
"""Test lsoda and analytic jacobian."""
solver_lsoda_jac = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='lsoda',
use_analytic_jacobian=True)
solver_lsoda_jac.run()
def test_y0_as_list(self):
"""Test y0 with list of initial conditions"""
# Test the initials getter method before anything is changed
assert np.allclose(
self.sim.initials[0][0:2],
[ic.value.value for ic in self.model.initials]
)
initials = [10, 20, 0]
simres = self.sim.run(initials=initials)
assert np.allclose(simres.initials[0], initials)
assert np.allclose(simres.observables['A_free'][0], 10)
def test_y0_as_ndarray(self):
"""Test y0 with numpy ndarray of initial conditions"""
simres = self.sim.run(initials=np.asarray([10, 20, 0]))
assert np.allclose(simres.observables['A_free'][0], 10)
def test_y0_as_dictionary_monomer_species(self):
"""Test y0 with model-defined species."""
self.sim.initials = {self.mon('A')(a=None): 17}
base_initials = self.sim.initials
assert base_initials[0][0] == 17
simres = self.sim.run(initials={self.mon('A')(a=None): 10,
self.mon('B')(b=1) % self.mon('A')(a=1): 0,
self.mon('B')(b=None): 0})
assert np.allclose(simres.initials, [10, 0, 0])
assert np.allclose(simres.observables['A_free'][0], 10)
# Initials should reset to base values
assert np.allclose(self.sim.initials, base_initials)
def test_y0_as_dictionary_with_bound_species(self):
"""Test y0 with dynamically generated species."""
simres = self.sim.run(initials={self.mon('A')(a=None): 0,
self.mon('B')(b=1) % self.mon('A')(a=1): 100,
self.mon('B')(b=None): 0})
assert np.allclose(simres.observables['AB_complex'][0], 100)
def test_y0_as_dataframe(self):
initials_dict = {self.mon('A')(a=None): [0],
self.mon('B')(b=1) % self.mon('A')(a=1): [100],
self.mon('B')(b=None): [0]}
initials_df = pd.DataFrame(initials_dict)
simres = self.sim.run(initials=initials_df)
assert np.allclose(simres.observables['AB_complex'][0], 100)
@raises(ValueError)
def test_y0_as_pandas_series(self):
self.sim.run(initials=pd.Series())
@raises(TypeError)
def test_y0_non_numeric_value(self):
"""Test y0 with non-numeric value."""
self.sim.run(initials={self.mon('A')(a=None): 'eggs'})
def test_param_values_as_dictionary(self):
"""Test param_values as a dictionary."""
simres = self.sim.run(param_values={'kbindAB': 0})
# kbindAB=0 should ensure no AB_complex is produced.
assert np.allclose(simres.observables["AB_complex"], 0)
def test_param_values_as_dataframe(self):
simres = self.sim.run(param_values=pd.DataFrame({'kbindAB': [0]}))
assert np.allclose(simres.observables['AB_complex'], 0)
@raises(ValueError)
def test_param_values_as_pandas_series(self):
self.sim.run(param_values=pd.Series())
def test_param_values_as_list_ndarray(self):
"""Test param_values as a list and ndarray."""
orig_param_values = self.sim.param_values
param_values = [50, 60, 70, 0, 0]
self.sim.param_values = param_values
simres = self.sim.run()
assert np.allclose(self.sim.param_values, param_values)
assert np.allclose(simres.param_values, param_values)
# Reset to original param values
self.sim.param_values = orig_param_values
# Same thing, but with a numpy array, applied as a run argument
param_values = np.asarray([55, 65, 75, 0, 0])
simres = self.sim.run(param_values=param_values)
assert np.allclose(simres.param_values, param_values)
# param_values should reset to originals after the run
assert np.allclose(self.sim.param_values, orig_param_values)
@raises(IndexError)
def test_param_values_invalid_dictionary_key(self):
"""Test param_values with invalid parameter name."""
self.sim.run(param_values={'spam': 150})
@raises(ValueError, TypeError)
def test_param_values_non_numeric_value(self):
"""Test param_values with non-numeric value."""
self.sim.run(param_values={'ksynthA': 'eggs'})
def test_result_dataframe(self):
df = self.sim.run().dataframe
class TestScipyOdeCompilerTests(TestScipySimulatorBase):
"""Test vode and analytic jacobian with different compiler backends"""
def setUp(self):
super(TestScipyOdeCompilerTests, self).setUp()
self.args = {'model': self.model,
'tspan': self.time,
'integrator': 'vode',
'use_analytic_jacobian': True}
self.python_sim = ScipyOdeSimulator(compiler='python', **self.args)
self.python_res = self.python_sim.run()
def test_cython(self):
sim = ScipyOdeSimulator(compiler='cython', **self.args)
simres = sim.run()
assert simres.species.shape[0] == self.args['tspan'].shape[0]
assert np.allclose(self.python_res.dataframe, simres.dataframe)
class TestScipySimulatorSequential(TestScipySimulatorBase):
def test_sequential_initials(self):
simres = self.sim.run()
orig_initials = self.sim.initials
new_initials = [10, 20, 30]
simres = self.sim.run(initials=new_initials)
# Check that single-run initials applied properly to the result
assert np.allclose(simres.species[0], new_initials)
assert np.allclose(simres.initials, new_initials)
# Check that the single-run initials were removed after the run
assert np.allclose(self.sim.initials, orig_initials)
def test_sequential_initials_dict_then_list(self):
A, B = self.model.monomers
base_sim = ScipyOdeSimulator(
self.model,
initials={A(a=None): 10, B(b=None): 20})
assert np.allclose(base_sim.initials, [10, 20, 0])
assert len(base_sim.initials_dict) == 2
# Now set initials using a list, which should overwrite the dict
base_sim.initials = [30, 40, 50]
assert np.allclose(base_sim.initials, [30, 40, 50])
assert np.allclose(
sorted([x[0] for x in base_sim.initials_dict.values()]),
base_sim.initials)
def test_sequential_param_values(self):
orig_param_values = self.sim.param_values
new_param_values = {'kbindAB': 0}
new_initials = [15, 25, 35]
simres = self.sim.run(param_values=new_param_values,
initials=new_initials)
# No new AB_complex should be formed
assert np.allclose(simres.observables['AB_complex'], new_initials[2])
assert simres.nsims == 1
# Original param_values should be restored after run
assert np.allclose(self.sim.param_values, orig_param_values)
# Check that per-run param override works when a base param
# dictionary is also specified
self.sim.param_values = new_param_values
base_param_values = new_param_values
new_param_values = {'ksynthB': 50}
simres = self.sim.run(param_values=new_param_values)
# Check that new param value override applied
assert np.allclose(simres.param_values[0][1],
new_param_values['ksynthB'])
# Check that simulator reverts to base param values
assert np.allclose(self.sim.param_values[0][2],
base_param_values['kbindAB'])
# Reset to original param values
self.sim.param_values = orig_param_values
def test_sequential_tspan(self):
tspan = np.linspace(0, 10, 11)
orig_tspan = self.sim.tspan
simres = self.sim.run(tspan=tspan)
# Check that new tspan applied properly
assert np.allclose(simres.tout, tspan)
# Check that simulator instance reset to original tspan
assert np.allclose(self.sim.tspan, orig_tspan)
class TestScipySimulatorMultiple(TestScipySimulatorBase):
def test_initials_and_param_values_two_lists(self):
initials = [[10, 20, 30], [50, 60, 70]]
param_values = [[55, 65, 75, 0, 0],
[90, 100, 110, 5, 6]]
import pysb.bng
pysb.bng.generate_equations(self.sim.model)
simres = self.sim.run(initials=initials, param_values=param_values)
assert np.allclose(simres.species[0][0], initials[0])
assert np.allclose(simres.species[1][0], initials[1])
assert np.allclose(simres.param_values[0], param_values[0])
assert np.allclose(simres.param_values[1], param_values[1])
assert simres.nsims == 2
# Check the methods underlying these properties work
df = simres.dataframe
all = simres.all
# Try overriding above lists of initials/params with dicts
self.sim.initials = initials
self.sim.param_values = param_values
simres = self.sim.run(
initials={self.mon('A')(a=None): [103, 104]},
param_values={'ksynthA': [101, 102]})
# Simulator initials and params should not persist run() overrides
assert np.allclose(self.sim.initials, initials)
assert np.allclose(self.sim.param_values, param_values)
# Create the expected initials/params arrays and compare to result
initials = np.array(initials)
initials[:, 0] = [103, 104]
param_values = np.array(param_values)
param_values[:, 0] = [101, 102]
assert np.allclose(simres.initials, initials)
assert np.allclose(simres.param_values, param_values)
@raises(ValueError)
def test_run_initials_different_length_to_base(self):
initials = [[10, 20, 30, 40], [50, 60, 70, 80]]
self.sim.initials = initials
self.sim.run(initials=initials[0])
@raises(ValueError)
def test_run_params_different_length_to_base(self):
param_values = [[55, 65, 75, 0, 0, 1],
[90, 100, 110, 5, 6, 7]]
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
@raises(InconsistentParameterError)
def test_run_params_inconsistent_parameter_list(self):
param_values = [55, 65, 75, 0, -3]
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
@raises(InconsistentParameterError)
def test_run_params_inconsistent_parameter_dict(self):
param_values = {'A_init': [0, -4]}
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
def test_param_values_dict(self):
param_values = {'A_init': [0, 100]}
initials = {self.model.monomers['B'](b=None): [250, 350]}
simres = self.sim.run(param_values=param_values)
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 100])
simres = self.sim.run(param_values={'B_init': [200, 300]})
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 0])
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'B_free'],
[200, 300])
simres = self.sim.run(initials=initials, param_values=param_values)
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 100])
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'B_free'],
[250, 350])
@raises(ValueError)
def test_initials_and_param_values_differing_lengths(self):
initials = [[10, 20, 30, 40], [50, 60, 70, 80]]
param_values = [[55, 65, 75, 0, 0],
[90, 100, 110, 5, 6],
[90, 100, 110, 5, 6]]
self.sim.run(initials=initials, param_values=param_values)
@unittest.skipIf(sys.version_info.major < 3,
'Parallel execution requires Python >= 3.3')
def test_parallel(self):
for integrator in ('vode', 'lsoda'):
for use_analytic_jacobian in (True, False):
yield self._check_parallel, integrator, use_analytic_jacobian
def _check_parallel(self, integrator, use_analytic_jacobian):
initials = [[10, 20, 30], [50, 60, 70]]
sim = ScipyOdeSimulator(
self.model, self.sim.tspan,
initials=initials,
integrator=integrator,
use_analytic_jacobian=use_analytic_jacobian
)
base_res = sim.run(initials=initials)
res = sim.run(initials=initials, num_processors=2)
assert np.allclose(res.species, base_res.species)
@with_model
def test_integrate_with_expression():
"""Ensure a model with Expressions simulates."""
Monomer('s1')
Monomer('s9')
Monomer('s16')
Monomer('s20')
# Parameters should be able to contain s(\d+) without error
Parameter('ks0',2e-5)
Parameter('ka20', 1e5)
Initial(s9(), Parameter('s9_0', 10000))
Observable('s1_obs', s1())
Observable('s9_obs', s9())
Observable('s16_obs', s16())
Observable('s20_obs', s20())
Expression('keff', (ks0*ka20)/(ka20+s9_obs))
Rule('R1', None >> s16(), ks0)
Rule('R2', None >> s20(), ks0)
Rule('R3', s16() + s20() >> s16() + s1(), keff)
time = np.linspace(0, 40)
sim = ScipyOdeSimulator(model, tspan=time)
simres = sim.run()
keff_vals = simres.expressions['keff']
assert len(keff_vals) == len(time)
assert np.allclose(keff_vals, 1.8181818181818182e-05)
def test_set_initial_to_zero():
sim = ScipyOdeSimulator(robertson.model, tspan=np.linspace(0, 100))
simres = sim.run(initials={robertson.model.monomers['A'](): 0})
assert np.allclose(simres.observables['A_total'], 0)
def test_robertson_integration():
"""Ensure robertson model simulates."""
t = np.linspace(0, 100)
sim = ScipyOdeSimulator(robertson.model, tspan=t, compiler="python")
simres = sim.run()
assert simres.species.shape[0] == t.shape[0]
# Also run with cython compiler if available.
if CythonRhsBuilder.check_safe():
sim = ScipyOdeSimulator(robertson.model, tspan=t, compiler="cython")
simres = sim.run()
assert simres.species.shape[0] == t.shape[0]
def test_earm_integration():
"""Ensure earm_1_0 model simulates."""
t = np.linspace(0, 1e3)
sim = ScipyOdeSimulator(earm_1_0.model, tspan=t, compiler="python")
sim.run()
# Also run with cython compiler if available.
if CythonRhsBuilder.check_safe():
ScipyOdeSimulator(earm_1_0.model, tspan=t, compiler="cython").run()
@raises(ValueError)
def test_simulation_no_tspan():
ScipyOdeSimulator(robertson.model).run()
@raises(UserWarning)
def test_nonexistent_integrator():
"""Ensure nonexistent integrator raises."""
ScipyOdeSimulator(robertson.model, tspan=np.linspace(0, 1, 2),
integrator='does_not_exist')
def test_unicode_obsname_ascii():
"""Ensure ascii-convetible unicode observable names are handled."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
rob_copy.observables[0].name = u'A_total'
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
simres.all
simres.dataframe
if sys.version_info[0] < 3:
@raises(ValueError)
def test_unicode_obsname_nonascii():
"""Ensure non-ascii unicode observable names error in python 2."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
rob_copy.observables[0].name = u'A_total\u1234'
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
def test_unicode_exprname_ascii():
"""Ensure ascii-convetible unicode expression names are handled."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
ab = rob_copy.observables['A_total'] + rob_copy.observables['B_total']
expr = Expression(u'A_plus_B', ab, _export=False)
rob_copy.add_component(expr)
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
simres.all
simres.dataframe
if sys.version_info[0] < 3:
@raises(ValueError)
def test_unicode_exprname_nonascii():
"""Ensure non-ascii unicode expression names error in python 2."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
ab = rob_copy.observables['A_total'] + rob_copy.observables['B_total']
expr = Expression(u'A_plus_B\u1234', ab, _export=False)
rob_copy.add_component(expr)
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
def test_multiprocessing_lambdify():
model = tyson_oscillator.model
pars = [p.value for p in model.parameters]
tspan = np.linspace(0, 100, 100)
ScipyOdeSimulator(
model, tspan=tspan, compiler='python',
use_analytic_jacobian=True
).run(param_values=[pars, pars], num_processors=2)
def test_lambdify_localfunc():
model = localfunc.model
ScipyOdeSimulator(model, tspan=range(100), compiler='python').run()
| bsd-2-clause |
ningchi/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
PatrickChrist/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
with-git/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
vdrhtc/Measurement-automation | lib2/correlatorMeasurement.py | 1 | 19512 | from copy import deepcopy
import numpy as np
import tqdm
from lib2.MeasurementResult import MeasurementResult
from lib2.stimulatedEmission import StimulatedEmission
from datetime import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import colorbar
from scipy import signal
import numba
@numba.njit(parallel=True)
def apply_along_axis(data, trace_len):
temp = np.zeros((trace_len, trace_len),
dtype=np.complex128)
for i in numba.prange(data.shape[0]):
temp += np.kron(np.conj(data[i]), data[i]) \
.reshape(trace_len, trace_len)
return temp
class CorrelatorMeasurement(StimulatedEmission):
def __init__(self, name, sample_name, comment,
q_lo=None, q_iqawg=None, dig=None):
super().__init__(name, sample_name, comment, q_lo=q_lo,
q_iqawg=q_iqawg, dig=dig)
self._measurement_result = CorrelatorResult(self._name,
self._sample_name)
self.avg = None
self.corr_avg = None
self.avg_corr = None
self._iterations_number = 0
self._segments_number = 1
self._freq_lims = None # for digital filtering
self._conv = None
self._b = None
self._temp = None
self._pause_in_samples_before_next_trigger = 0 # > 80 samples
def set_fixed_parameters(self, pulse_sequence_parameters,
freq_limits=(-90e6, 90e6), delay_correction=0,
down_conversion_calibration=None,
subtract_pi=False, q_lo_params=None,
q_iqawg_params=None, dig_params=None,
apply_filter=True, iterations_number=100,
do_convert=True):
"""
Parameters
----------
pulse_sequence_parameters: dict
single pulse parameters
freq_limits: tuple of 2 values
delay_correction: int
A correction of a digitizer delay given in samples
For flexibility, it is applied after the measurement. For example,
when you look at the trace and decide, that the digitizer delay
should have been different
down_conversion_calibration: IQDownconversionCalibrationResult
subtract_pi: bool
True if you want to make the Furier spectrum clearer by
subtracting the same trace with pulses shifted by phase pi
q_lo_params
q_iqawg_params
dig_params
Returns
-------
Nothing
"""
q_lo_params[0]["power"] = q_iqawg_params[0]["calibration"] \
.get_radiation_parameters()["lo_power"]
# a snapshot of initial seq pars structure passed into measurement
self._pulse_sequence_parameters_init = deepcopy(
pulse_sequence_parameters
)
super().set_fixed_parameters(
pulse_sequence_parameters,
freq_limits=freq_limits,
down_conversion_calibration=down_conversion_calibration,
q_lo_params=q_lo_params,
q_iqawg_params=q_iqawg_params,
dig_params=dig_params
)
self._delay_correction = delay_correction
self._freq_lims = freq_limits
self.apply_filter = apply_filter
self._do_convert = do_convert
# longest repetition period is initially set with data from
# 'pulse_sequence_paramaters'
self.max_segment_duration = \
pulse_sequence_parameters["repetition_period"] * \
pulse_sequence_parameters["periods_per_segment"]
self._iterations_number = iterations_number
self._segments_number = dig_params[0]["n_seg"]
dig = self._dig[0]
""" Supplying additional arrays to 'self._measurement_result' class """
meas_data = self._measurement_result.get_data()
# if_freq is already set in call of 'super()' class method
# time in nanoseconds
meas_data["sample_rate"] = dig.get_sample_rate()
self._measurement_result.sample_rate = dig.get_sample_rate()
self._measurement_result.set_data(meas_data)
def dont_sweep(self):
super().set_swept_parameters(
**{
"number": (
self._output_pulse_sequence, [False]
)
}
)
def _output_pulse_sequence(self, zero=False):
dig = self._dig[0]
timedelay = self._pulse_sequence_parameters["start_delay"] + \
self._pulse_sequence_parameters["digitizer_delay"]
dig.calc_and_set_trigger_delay(timedelay, include_pretrigger=True)
self._n_samples_to_drop_by_delay =\
dig.get_how_many_samples_to_drop_in_front()
"""
Because readout duration coincides with trigger
period the very next trigger is missed by digitizer.
Decreasing segment size by fixed amount > 80
(e.g. 128) gives enough time to digitizer to catch the very next
trigger event.
Rearm before trigger is quantity you wish to take into account
see more on rearm timings in digitizer manual
"""
# readout_start + readout_duration < repetition period - 100 ns
dig.calc_segment_size(decrease_segment_size_by=self._pause_in_samples_before_next_trigger)
# not working somehow, but rearming time
# equals'80 + pretrigger' samples
# maybe due to the fact that some extra values are sampled at the end
# of the trace in order to make 'segment_size' in samples to be
# dividable by 32 as required by digitizer
self._n_samples_to_drop_in_end =\
dig.get_how_many_samples_to_drop_in_end()
dig.setup_current_mode()
q_pbs = [q_iqawg.get_pulse_builder() for q_iqawg in self._q_iqawg]
# TODO: 'and (self._q_z_awg[0] is not None)' hotfix by Shamil (below)
# I intend to declare all possible device attributes of the measurement class in it's child class definitions.
# So hasattr(self, "_q_z_awg") is always True
# due to the fact that I had declared this parameter and initialized it with "[None]" in RabiFromFrequencyTEST.py
if hasattr(self, '_q_z_awg') and (self._q_z_awg[0] is not None):
q_z_pbs = [q_z_awg.get_pulse_builder() for q_z_awg in
self._q_z_awg]
else:
q_z_pbs = [None]
pbs = {'q_pbs': q_pbs,
'q_z_pbs': q_z_pbs}
if not zero:
seqs = self._sequence_generator(self._pulse_sequence_parameters,
**pbs)
self.seqs = seqs
else:
self._q_iqawg[0].output_zero(
trigger_sync_every=self._pulse_sequence_parameters["repetition_period"]
)
return
for (seq, q_iqawg) in zip(seqs['q_seqs'], self._q_iqawg):
# check if output trace length is dividable by awg's output
# trigger clock period
# TODO: The following lines are moved to the KeysightM3202A
# driver. Should be deleted later from here
# if seq.get_duration() % \
# q_iqawg._channels[0].host_awg.trigger_clock_period != 0:
# raise ValueError(
# "AWG output duration has to be multiple of the AWG's "
# "trigger clock period\n"
# f"requested waveform duration: {seq.get_duration()} ns\n"
# f"trigger clock period: {q_iqawg._channels[0].host_awg.trigger_clock_period}"
# )
q_iqawg.output_pulse_sequence(seq)
if 'q_z_seqs' in seqs.keys():
for (seq, dev) in zip(seqs['q_z_seqs'], self._q_z_awg):
dev.output_pulse_sequence(seq, asynchronous=False)
def _record_data(self):
par_names = self._swept_pars_names
start_time = dt.now()
self._measurement_result.set_start_datetime(start_time)
parameters_values = [self._swept_pars[parameter_name][1]
for parameter_name in par_names]
# This should be implemented in child classes:
self._raw_data = self._recording_iteration()
# This may need to be extended in child classes:
measurement_data = self._prepare_measurement_result_data(par_names,
parameters_values)
self._measurement_result.set_data(measurement_data)
self._measurement_result._iter_idx_ready = [len(parameters_values[0])]
time_elapsed = dt.now() - start_time
self._measurement_result.set_recording_time(time_elapsed)
print(f"\nElapsed time: "
f"{self._format_time_delta(time_elapsed.total_seconds())}")
self._finalize()
def _measure_one_trace(self):
"""
Function starts digitizer measurement.
Digitizer assumed already configured and waiting for start trace.
Returns
-------
tuple(np.ndarray, np.ndarray)
returns pair (time, data) np arrays
time - real-valued 1D array. If down-conversion calibration is
applied this array will differ from np.linspace
"""
dig = self._dig[0]
dig_data = dig.measure() # data in mV
# construct complex valued scalar trace
data = dig_data[0::2] + 1j * dig_data[1::2]
'''
In order to allow digitizer to don't miss very next
trigger while the acquisition window is almost equal to
the trigger period, acquisition window is shrank
by 'self.__pause_in_samples_before_trigger' samples.
In order to obtain data of the desired length the code below adds
'self.__pause_in_samples_before_trigger' trailing zeros to the end
of each segment.
Finally result is flattened in order to perform DFT.
'''
dig = self._dig[0]
# 2D array that will be set to the trace avg value
# and appended to the end of each segment of the trace
# scalar average is multiplied by 'np.ones()' of the appropriate 2D
# shape
'''commented since self._pause_in_samples_before_next_trigger is
zero'''
# avgs_to_concat = np.full((dig.n_seg,
# self._pause_in_samples_before_next_trigger),
# np.mean(data))
data = data.reshape(dig.n_seg, -1)
# 'slice_stop' variable allows to avoid production of an empty list
# by slicing operation. Empty slice is obtained if
# 'slice_stop = -self._n_samples_to_drop_in_end' and equals zero.
slice_stop = data.shape[-1] - self._n_samples_to_drop_in_end
# dropping samples from the end to get needed duration
data = data[:, self._n_samples_to_drop_by_delay:slice_stop]
# append average to the end of trace to complete overall duration
# to 'repetition_period' or whatever
'''commented since self._pause_in_samples_before_next_trigger is
zero '''
# data = np.hstack((data, avgs_to_concat))
# for debug purposes, saving raw data
if self._save_traces:
self.dataIQ.append(data)
# Applying mixer down-conversion calibration to data
time = np.linspace(
0,
data.shape[-1] / dig.get_sample_rate() * 1e9,
data.shape[-1],
endpoint=False
)
if self._down_conversion_calibration is not None:
data = self._down_conversion_calibration.apply(data)
if "time_cal" not in self._measurement_result.get_data():
tmp = self._measurement_result.get_data()
shift = self._delay_correction
tmp["time_cal"] = time[shift:self._nfft + shift]
self._measurement_result.set_data(tmp)
return time, data
def _recording_iteration(self):
for i in tqdm.tqdm_notebook(range(self._iterations_number)):
# measuring trace
self._output_pulse_sequence()
time, data = self._measure_one_trace()
# measuring only noise
self._output_pulse_sequence(zero=True)
time_bg, data_bg = self._measure_one_trace()
trace_len = data.shape[-1]
# down-converting to DC
if self._do_convert:
if_freq = self._q_iqawg[0].get_calibration().get_if_frequency()
self._conv = np.exp(-2j * np.pi * if_freq * time / 1e9)
self._conv = np.resize(self._conv, data.shape)
data = data * self._conv
data_bg = data_bg * self._conv
# filtering excessive frequencies
if self.apply_filter:
if self._b is None:
if self._do_convert:
self._b = signal.firwin(trace_len, self._freq_lims[1],
fs=self._dig[0].get_sample_rate())
else:
self._b = signal.firwin(len(data), self._freq_lims,
fs=self._dig[0].get_sample_rate(),
pass_zero=(self._freq_lims[0] < 0 <
self._freq_lims[1]))
self._b = np.resize(self._b, data.shape)
data = signal.fftconvolve(self._b, data, mode="same", axes=-1)
data_bg = signal.fftconvolve(self._b, data_bg, mode="same",
axes=-1)
# initializing arrays for correlators storage
if self.avg is None:
self.avg = np.zeros(trace_len, dtype=np.clongdouble)
self.corr_avg = np.zeros((trace_len, trace_len),
dtype=np.clongdouble)
self.avg_corr = np.zeros((trace_len, trace_len),
dtype=np.clongdouble)
# processing data with trace applied
avg_corrs = apply_along_axis(data, trace_len)
# avg_corrs = np.apply_along_axis(
# lambda x: np.reshape(
# np.kron(np.conj(x), x),
# (trace_len, trace_len)
# ),
# 1, # axis that function is applied along (along traces)
# data
# ).sum(axis=0)
# processing background data
avg_bg_corrs = apply_along_axis(data_bg, trace_len)
# avg_bg_corrs = np.apply_along_axis(
# lambda x: np.reshape(
# np.kron(np.conj(x), x),
# (trace_len, trace_len)
# ),
# 1, # axis along which function is applied (along traces)
# data_bg
# ).sum(axis=0)
# <E(t)>
self.avg += data.sum(axis=0)
# <E+(t1)><E(t2)>
self.corr_avg = np.reshape(
np.kron(np.conj(self.avg), self.avg),
(trace_len, trace_len)
)
# <E+(t1) E(t2)>
self.avg_corr += avg_corrs - avg_bg_corrs
# Saving preliminary data in the Measurement Result
K = (i + 1) * self._segments_number
self._measurement_result.corr_avg = self.corr_avg.copy() / K**2
self._measurement_result.avg_corr = self.avg_corr.copy() / K
# returning the final result
K = self._iterations_number * self._segments_number
return self.corr_avg / K**2, self.avg_corr / K
class CorrelatorResult(MeasurementResult):
def __init__(self, name, sample_name):
super().__init__(name, sample_name)
self._XX = None
self._YY = None
self.corr_avg = None
self.avg_corr = None
self.sample_rate = 1.25e9
def set_parameter_name(self, parameter_name):
self._parameter_name = parameter_name
def _prepare_figure(self):
fig, (ax_map_re, ax_map_im) = plt.subplots(nrows=1, ncols=2,
constrained_layout=True, figsize=(17, 8), sharex=True, sharey=True)
labelx = "$t_1$, ns"
labely = "$t_2$, ns"
ax_map_re.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
ax_map_re.set_ylabel(labely)
ax_map_re.set_xlabel(labelx)
ax_map_re.autoscale_view(True, True, True)
ax_map_im.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
ax_map_im.set_xlabel(labelx)
ax_map_im.autoscale_view(True, True, True)
# plt.tight_layout(pad=1, h_pad=2, w_pad=-7)
cax_re, kw = colorbar.make_axes(ax_map_re, aspect=40)
cax_im, kw = colorbar.make_axes(ax_map_im, aspect=40)
ax_map_re.set_title(r"$\langle a^\dagger(t_1)a(t_2) \rangle$")
# ax_map_im.set_title(r"$\langle a^\dagger(t_1)\rangle \langle a(t_2) "
# r"\rangle$")
ax_map_im.set_title(r"$\langle a^\dagger(t_1)a(t_2) \rangle$ - "
r"$\langle a^\dagger(t_1)\rangle \langle a(t_2) "
r"\rangle$")
ax_map_re.grid(False)
ax_map_im.grid(False)
fig.canvas.set_window_title(self._name)
return fig, (ax_map_re, ax_map_im), (cax_re, cax_im)
def _plot(self, data):
# if (self.corr_avg is None) and (self.avg_corr is None):
if (self.corr_avg is None) or (self.avg_corr is None):
return
ax_corr, ax_diff = self._axes
cax_corr = self._caxes[0]
cax_diff = self._caxes[1]
# if self._XX is None:
# return
XX, YY, corr, diff = self._prepare_data_for_plot()
corr_max = np.max(corr)
corr_min = np.min(corr)
diff_max = np.max(diff)
diff_min = np.min(diff)
step = XX[1, 0] - XX[0, 0]
self.extent = (XX[0, 0] - 0.5 * step, XX[0, -1] + 0.5 * step,
YY[0, 0] - 0.5 * step, YY[-1, 0] + 0.5 * step)
if self.extent[2] == self.extent[3]:
return
corr_map = ax_corr.imshow(corr, origin='lower', cmap="RdBu_r",
aspect='auto', vmax=corr_max,
vmin=corr_min, extent=self.extent)
cax_corr.cla()
plt.colorbar(corr_map, cax=cax_corr)
cax_corr.tick_params(axis='y', right=False, left=True,
labelleft=True, labelright=False, labelsize='10')
diff_map = ax_diff.imshow(diff, origin='lower', cmap="RdBu_r",
aspect='auto', vmax=diff_max,
vmin=diff_min, extent=self.extent)
cax_diff.cla()
plt.colorbar(diff_map, cax=cax_diff)
cax_diff.tick_params(axis='y', right=False, left=True,
labelleft=True, labelright=False,
labelsize='10')
def _prepare_data_for_plot(self):
time = np.linspace(0,
self.avg_corr.shape[0] / self.sample_rate * 1e9,
self.avg_corr.shape[0])
self._XX, self._YY = np.meshgrid(time, time)
return self._XX, self._YY, np.real(self.avg_corr),\
np.real(self.avg_corr - self.corr_avg)
# np.real(self.corr_avg) | gpl-3.0 |
sirex/jsontableschema-pandas-py | tableschema_pandas/storage.py | 1 | 4592 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import collections
import tableschema
import pandas as pd
from .mapper import Mapper
# Module API
class Storage(tableschema.Storage):
"""Pandas storage
Package implements
[Tabular Storage](https://github.com/frictionlessdata/tableschema-py#storage)
interface (see full documentation on the link):

> Only additional API is documented
# Arguments
dataframes (object[]): list of storage dataframes
"""
# Public
def __init__(self, dataframes=None):
# Set attributes
self.__dataframes = dataframes or collections.OrderedDict()
self.__descriptors = {}
# Create mapper
self.__mapper = Mapper()
def __repr__(self):
return 'Storage'
def __getitem__(self, key):
"""Returns Pandas dataframe
# Arguments
name (str): name
"""
return self.__dataframes[key]
@property
def buckets(self):
return list(sorted(self.__dataframes.keys()))
def create(self, bucket, descriptor, force=False):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Check buckets for existence
for bucket in buckets:
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define dataframes
for bucket, descriptor in zip(buckets, descriptors):
tableschema.validate(descriptor)
self.__descriptors[bucket] = descriptor
self.__dataframes[bucket] = pd.DataFrame()
def delete(self, bucket=None, ignore=False):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate over buckets
for bucket in buckets:
# Non existent bucket
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from descriptors
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Remove from dataframes
if bucket in self.__dataframes:
del self.__dataframes[bucket]
def describe(self, bucket, descriptor=None):
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
dataframe = self.__dataframes[bucket]
descriptor = self.__mapper.restore_descriptor(dataframe)
return descriptor
def iter(self, bucket):
# Check existense
if bucket not in self.buckets:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
# Prepare
descriptor = self.describe(bucket)
schema = tableschema.Schema(descriptor)
# Yield rows
for pk, row in self.__dataframes[bucket].iterrows():
row = self.__mapper.restore_row(row, schema=schema, pk=pk)
yield row
def read(self, bucket):
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows):
# Prepare
descriptor = self.describe(bucket)
new_data_frame = self.__mapper.convert_descriptor_and_rows(descriptor, rows)
# Just set new DataFrame if current is empty
if self.__dataframes[bucket].size == 0:
self.__dataframes[bucket] = new_data_frame
# Append new data frame to the old one setting new data frame
# containing data from both old and new data frames
else:
self.__dataframes[bucket] = pd.concat([
self.__dataframes[bucket],
new_data_frame,
])
| lgpl-3.0 |
js7558/pyBinance | tests/test-createOrder.py | 1 | 4354 | #!/usr/bin/python
import pandas as pd
import sys
sys.path.append('../')
from Binance import Binance
import logging.config
import logging.handlers
import logging
import os
# this logging configuration is sketchy
binance = logging.getLogger(__name__)
logging.config.fileConfig('logging.ini')
# create Binance object
bn = Binance()
# set keys
bn.setSecretKey('NhqPtmdSJYdKjVHjA7PZj4Mge3R5YNiP1e3UZjInClVN65XAbvqqM6A7H5fATj0j')
bn.setAPIKey('vmPUZE6mv9SD5VNHk4HlWFsOr6aKE2zvsw0MuIgwCIPy6utIco14y7Ju91duEh8A')
# createOrder
print "---------------- createOrder --------------"
print "################################# POSITIVE TESTS (returns 1 or r) ###################"
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0}
print "****test valid mandatory inputs"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'123456778'}
print "****test valid mandatory inputs plus some optional"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'223456778'}
print "****test valid mandatory inputs plus some optional"
test = bn.createOrder(queryParams)
print
queryParams = {'stopPrice':3.0,'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'223456778'}
print "****test valid mandatory inputs plus some optional"
test = bn.createOrder(queryParams)
print
queryParams = {'icebergQty':10.5,'stopPrice':3.0,'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'223456778'}
print "****test valid mandatory inputs plus some optional"
test = bn.createOrder(queryParams)
print "################################# NEGATIVE TESTS (returns 0) ###################"
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0}
print "****test valid mandatory inputs, valid parameter missing"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','price':1.0}
print "****test valid mandatory inputs, valid parameter missing"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':5,'type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0}
print "****test valid mandatory inputs present with invalid type"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':12,'price':2.0}
print "****test valid mandatory inputs present with invalid type"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'DUCK','type':'LIMIT','timeInForce':'GTC','quantity':13.0,'price':2.0}
print "****test valid mandatory inputs present with valid type but not in enum"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'FAKEBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':13.0,'price':2.0}
print "****test valid mandatory inputs present with valid type but not in enum"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'ETHBTC','side':'BUY','type':'BANANA','timeInForce':'GTC','quantity':13.0,'price':2.0}
print "****test valid mandatory inputs present with valid type but not in enum"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'ETHBTC','side':'BUY','type':'MARKET','timeInForce':'DARTH VADER','quantity':13.0,'price':2.0}
print "****test valid mandatory inputs present with valid type but not in enum"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'123456778','timestamp':150774295}
print "****test valid mandatory inputs, invalid user proved timestamp, plus some optional"
test = bn.createOrder(queryParams)
print
queryParams = {'symbol':'SALTBTC','side':'BUY','type':'LIMIT','timeInForce':'GTC','quantity':1.0,'price':2.0,'newClientOrderId':'123456778','timestamp':'abcdefghijklm'}
print "****test valid mandatory inputs, invalid user proved timestamp type but length ok, plus some optional"
test = bn.createOrder(queryParams)
print
| mit |
iZehan/spatial-pbs | setup.py | 1 | 4732 | """
Created on 21 May 2014
@author: Zehan Wang
Copyright (C) Zehan Wang 2011-2014 and onwards
Library for patch-based segmentation with spatial context. (Spatially Aware Patch-based Segmentation)
"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
DISTNAME = 'spatch'
DESCRIPTION = 'Library for patch-based segmentation with spatial context. (Spatially Aware Patch-based Segmentation)'
MAINTAINER = 'Zehan Wang'
MAINTAINER_EMAIL = 'zehan.wang06@imperial.ac.uk'
LICENSE = 'new BSD'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import spatch
VERSION = spatch.__version__
###############################################################################
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed')).intersection(sys.argv)) > 0:
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True)
else:
extra_setuptools_args = dict()
###############################################################################
class CleanCommand(Clean):
description = "Remove build directories, and compiled file in the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('spatch'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll') or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
###############################################################################
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('spatch')
return config
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'],
cmdclass={'clean': CleanCommand})
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
rexthompson/axwx | axwx/wu_metadata_scraping.py | 1 | 5613 | """
Weather Underground PWS Metadata Scraping Module
Code to scrape PWS network metadata
"""
import pandas as pd
import urllib3
from bs4 import BeautifulSoup as BS
import numpy as np
import requests
# import time
def scrape_station_info(state="WA"):
"""
A script to scrape the station information published at the following URL:
https://www.wunderground.com/weatherstation/ListStations.asp?
selectedState=WA&selectedCountry=United+States&MR=1
:param state: US State by which to subset WU Station table
:return: numpy array with station info
"""
url = "https://www.wunderground.com/" \
"weatherstation/ListStations.asp?selectedState=" \
+ state + "&selectedCountry=United+States&MR=1"
raw_site_content = requests.get(url).content
soup = BS(raw_site_content, 'html.parser')
list_stations_info = soup.find_all("tr")
all_station_info = np.array(['id', 'neighborhood', 'city', 'type', 'lat',
'lon', 'elevation'])
for i in range(1, len(list_stations_info)): # start at 1 to omit headers
station_info = str(list_stations_info[i]).splitlines()
# pull out station info
station_id = station_info[1].split('ID=')[1].split('"')[0]
station_neighborhood = station_info[2].split('<td>')[1]
station_neighborhood = station_neighborhood.split('\xa0')[0]
station_city = station_info[3].split('<td>')[1].split('\xa0')[0]
station_type = station_info[4].split('station-type">')[1]
station_type = station_type.split('\xa0')[0]
station_id = station_id.strip()
station_neighborhood = station_neighborhood.strip()
station_city = station_city.strip()
station_type = station_type.strip()
# grab the latitude, longitude, and elevation metadata
lat, lon, elev = scrape_lat_lon_fly(station_id)
# put all data into an array
header = [station_id, station_neighborhood, station_city, station_type,
lat, lon, elev]
head_len = len(header)
all_station_info = np.vstack([all_station_info, header])
all_station_info = pd.DataFrame(all_station_info)
all_station_info.columns = all_station_info.ix[0, :]
# do some dataframe editing
all_station_info = all_station_info.drop(all_station_info
.index[0]).reset_index()
all_station_info = all_station_info.drop(all_station_info.columns[0],
axis=1)
return(all_station_info.to_csv('./data/station_data_from_FUN.csv'))
def scrape_lat_lon_fly(stationID):
"""
Add latitude, longitude and elevation data to the stationID that is
inputted as the argument to the function. Boom.
:param stationID: str
a unique identifier for the weather underground personal
weather station
:return: (latitude,longitude,elevation) as a tuple. Double Boom.
"""
http = urllib3.PoolManager(maxsize=10, block=True,
cert_reqs='CERT_REQUIRED')
try:
url = 'https://api.wunderground.com/weatherstation/' \
'WXDailyHistory.asp?ID={0}&format=XML'.format(stationID)
r = http.request('GET', url, preload_content=False)
soup = BS(r, 'xml')
lat = soup.find_all('latitude')[0].get_text()
long = soup.find_all('longitude')[0].get_text()
elev = soup.find_all('elevation')[0].get_text()
return(lat, long, elev)
except Exception as err:
lat = 'NA'
long = 'NA'
elev = 'NA'
return(lat, long, elev)
def subset_stations_by_coords(station_data, lat_range, lon_range):
"""
Subset station metadata by latitude and longitude
:param station_data_csv: str or Pandas.DataFrame
filename of csv with station metadata (from scrape_lat_lon)
or
Pandas.DataFrame with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: pandas.DataFrame with station metadata subset by lat/lon bounds
"""
lat_range.sort()
lon_range.sort()
if isinstance(station_data, str):
df = pd.read_csv(station_data, index_col=1)
df = df.dropna(subset=["Latitude", "Longitude"])
elif isinstance(station_data, pd.DataFrame):
df = station_data
else:
pass
# TODO: add exception here if type not supported
df = df[(df["Latitude"] >= lat_range[0]) &
(df["Latitude"] <= lat_range[1]) &
(df["Longitude"] >= lon_range[0]) &
(df["Longitude"] <= lon_range[1])]
return df
def get_station_ids_by_coords(station_data_csv, lat_range, lon_range):
"""
Wrapper around subset_stations_by_coords; returns just the IDs of the
stations in a box
:param station_data_csv: str
filename of csv with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: list of station IDs (strings)
"""
df = subset_stations_by_coords(station_data_csv, lat_range, lon_range)
return list(df.index)
# TESTING
# station_data_csv = "data/station_data.csv"
# lat_range = [47.4, 47.8]
# lon_range = [-122.5, -122.2]
# print(get_station_ids_by_coords(station_data_csv, lat_range, lon_range))
| mit |
deepfield/ibis | ibis/pandas/execution/tests/test_cast.py | 1 | 3838 | import pytest
import decimal
import pandas as pd
import pandas.util.testing as tm # noqa: E402
import ibis.expr.datatypes as dt # noqa: E402
import ibis
pytestmark = pytest.mark.pandas
@pytest.mark.parametrize('from_', ['plain_float64', 'plain_int64'])
@pytest.mark.parametrize(
('to', 'expected'),
[
('double', 'float64'),
('float', 'float32'),
('int8', 'int8'),
('int16', 'int16'),
('int32', 'int32'),
('int64', 'int64'),
('string', 'object'),
],
)
def test_cast_numeric(t, df, from_, to, expected):
c = t[from_].cast(to)
result = c.execute()
assert str(result.dtype) == expected
@pytest.mark.parametrize('from_', ['float64_as_strings', 'int64_as_strings'])
@pytest.mark.parametrize(
('to', 'expected'),
[
('double', 'float64'),
('string', 'object'),
]
)
def test_cast_string(t, df, from_, to, expected):
c = t[from_].cast(to)
result = c.execute()
assert str(result.dtype) == expected
@pytest.mark.parametrize(
('to', 'expected'),
[
('string', 'object'),
('int64', 'int64'),
pytest.mark.xfail(('double', 'float64'), raises=TypeError),
(
dt.Timestamp('America/Los_Angeles'),
'datetime64[ns, America/Los_Angeles]'
),
(
"timestamp('America/Los_Angeles')",
'datetime64[ns, America/Los_Angeles]'
)
]
)
@pytest.mark.parametrize(
'column',
[
'plain_datetimes_naive',
'plain_datetimes_ny',
'plain_datetimes_utc',
]
)
def test_cast_timestamp_column(t, df, column, to, expected):
c = t[column].cast(to)
result = c.execute()
assert str(result.dtype) == expected
@pytest.mark.parametrize(
('to', 'expected'),
[
('string', str),
('int64', lambda x: x.value),
pytest.mark.xfail(('double', float), raises=NotImplementedError),
(
dt.Timestamp('America/Los_Angeles'),
lambda x: pd.Timestamp(x, tz='America/Los_Angeles')
)
]
)
@pytest.mark.parametrize('tz', [None, 'UTC', 'America/New_York'])
def test_cast_timestamp_scalar(to, expected, tz):
literal_expr = ibis.literal(pd.Timestamp('now', tz=tz))
value = literal_expr.cast(to)
result = ibis.pandas.execute(value)
raw = ibis.pandas.execute(literal_expr)
assert result == expected(raw)
def test_timestamp_with_timezone_is_inferred_correctly(t, df):
assert t.plain_datetimes_naive.type().equals(dt.timestamp)
assert t.plain_datetimes_ny.type().equals(dt.Timestamp('America/New_York'))
assert t.plain_datetimes_utc.type().equals(dt.Timestamp('UTC'))
@pytest.mark.parametrize(
'column',
[
'plain_datetimes_naive',
'plain_datetimes_ny',
'plain_datetimes_utc',
]
)
def test_cast_date(t, df, column):
expr = t[column].cast('date')
result = expr.execute()
expected = df[column].dt.normalize()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('type', [dt.Decimal(9, 0), dt.Decimal(12, 3)])
def test_cast_to_decimal(t, df, type):
expr = t.float64_as_strings.cast(type)
result = expr.execute()
context = decimal.Context(prec=type.precision)
expected = df.float64_as_strings.apply(
lambda x: context.create_decimal(x).quantize(
decimal.Decimal(
'{}.{}'.format(
'0' * (type.precision - type.scale),
'0' * type.scale
)
)
)
)
tm.assert_series_equal(result, expected)
assert all(
abs(element.as_tuple().exponent) == type.scale
for element in result.values
)
assert all(
1 <= len(element.as_tuple().digits) <= type.precision
for element in result.values
)
| apache-2.0 |
amandalund/openmc | openmc/plotter.py | 6 | 38919 | from itertools import chain
from numbers import Integral, Real
import string
import numpy as np
import openmc.checkvalue as cv
import openmc.data
# Supported keywords for continuous-energy cross section plotting
PLOT_TYPES = ['total', 'scatter', 'elastic', 'inelastic', 'fission',
'absorption', 'capture', 'nu-fission', 'nu-scatter', 'unity',
'slowing-down power', 'damage']
# Supported keywords for multi-group cross section plotting
PLOT_TYPES_MGXS = ['total', 'absorption', 'scatter', 'fission',
'kappa-fission', 'nu-fission', 'prompt-nu-fission',
'deleyed-nu-fission', 'chi', 'chi-prompt', 'chi-delayed',
'inverse-velocity', 'beta', 'decay-rate', 'unity']
# Create a dictionary which can be used to convert PLOT_TYPES_MGXS to the
# openmc.XSdata attribute name needed to access the data
_PLOT_MGXS_ATTR = {line: line.replace(' ', '_').replace('-', '_')
for line in PLOT_TYPES_MGXS}
_PLOT_MGXS_ATTR['scatter'] = 'scatter_matrix'
# Special MT values
UNITY_MT = -1
XI_MT = -2
# MTs to combine to generate associated plot_types
_INELASTIC = [mt for mt in openmc.data.SUM_RULES[3] if mt != 27]
PLOT_TYPES_MT = {
'total': openmc.data.SUM_RULES[1],
'scatter': [2] + _INELASTIC,
'elastic': [2],
'inelastic': _INELASTIC,
'fission': [18],
'absorption': [27],
'capture': [101],
'nu-fission': [18],
'nu-scatter': [2] + _INELASTIC,
'unity': [UNITY_MT],
'slowing-down power': [2] + [XI_MT],
'damage': [444]
}
# Types of plots to plot linearly in y
PLOT_TYPES_LINEAR = {'nu-fission / fission', 'nu-scatter / scatter',
'nu-fission / absorption', 'fission / absorption'}
# Minimum and maximum energies for plotting (units of eV)
_MIN_E = 1.e-5
_MAX_E = 20.e6
def plot_xs(this, types, divisor_types=None, temperature=294., data_type=None,
axis=None, sab_name=None, ce_cross_sections=None,
mg_cross_sections=None, enrichment=None, plot_CE=True, orders=None,
divisor_orders=None, **kwargs):
"""Creates a figure of continuous-energy cross sections for this item.
Parameters
----------
this : str or openmc.Material
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to include in the plot.
divisor_types : Iterable of values of PLOT_TYPES, optional
Cross section types which will divide those produced by types
before plotting. A type of 'unity' can be used to effectively not
divide some types.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
data_type : {'nuclide', 'element', 'material', 'macroscopic'}, optional
Type of object to plot. If not specified, a guess is made based on the
`this` argument.
axis : matplotlib.axes, optional
A previously generated axis to use for plotting. If not specified,
a new axis and figure will be generated.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable; only used
for items which are instances of openmc.Element or openmc.Nuclide
ce_cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
mg_cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None. This is only used for
items which are instances of openmc.Element
plot_CE : bool, optional
Denotes whether or not continuous-energy will be plotted. Defaults to
plotting the continuous-energy data.
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data. This only applies to plots of
multi-group data.
divisor_orders : Iterable of Integral, optional
Same as orders, but for divisor_types
**kwargs
All keyword arguments are passed to
:func:`matplotlib.pyplot.figure`.
Returns
-------
fig : matplotlib.figure.Figure
If axis is None, then a Matplotlib Figure of the generated
cross section will be returned. Otherwise, a value of
None will be returned as the figure and axes have already been
generated.
"""
import matplotlib.pyplot as plt
cv.check_type("plot_CE", plot_CE, bool)
if data_type is None:
if isinstance(this, openmc.Nuclide):
data_type = 'nuclide'
elif isinstance(this, openmc.Element):
data_type = 'element'
elif isinstance(this, openmc.Material):
data_type = 'material'
elif isinstance(this, openmc.Macroscopic):
data_type = 'macroscopic'
elif isinstance(this, str):
if this[-1] in string.digits:
data_type = 'nuclide'
else:
data_type = 'element'
else:
raise TypeError("Invalid type for plotting")
if plot_CE:
# Calculate for the CE cross sections
E, data = calculate_cexs(this, data_type, types, temperature, sab_name,
ce_cross_sections, enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_cexs(this, divisor_types, temperature,
sab_name, ce_cross_sections,
enrichment)
# Create a new union grid, interpolate data and data_div on to that
# grid, and then do the actual division
Enum = E[:]
E = np.union1d(Enum, Ediv)
data_new = np.zeros((len(types), len(E)))
for line in range(len(types)):
data_new[line, :] = \
np.divide(np.interp(E, Enum, data[line, :]),
np.interp(E, Ediv, data_div[line, :]))
if divisor_types[line] != 'unity':
types[line] = types[line] + ' / ' + divisor_types[line]
data = data_new
else:
# Calculate for MG cross sections
E, data = calculate_mgxs(this, data_type, types, orders, temperature,
mg_cross_sections, ce_cross_sections,
enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_mgxs(this, data_type, divisor_types,
divisor_orders, temperature,
mg_cross_sections,
ce_cross_sections, enrichment)
# Perform the division
for line in range(len(types)):
data[line, :] /= data_div[line, :]
if divisor_types[line] != 'unity':
types[line] += ' / ' + divisor_types[line]
# Generate the plot
if axis is None:
fig, ax = plt.subplots()
else:
fig = None
ax = axis
# Set to loglog or semilogx depending on if we are plotting a data
# type which we expect to vary linearly
if set(types).issubset(PLOT_TYPES_LINEAR):
plot_func = ax.semilogx
else:
plot_func = ax.loglog
# Plot the data
for i in range(len(data)):
data[i, :] = np.nan_to_num(data[i, :])
if np.sum(data[i, :]) > 0.:
plot_func(E, data[i, :], label=types[i])
ax.set_xlabel('Energy [eV]')
if plot_CE:
ax.set_xlim(_MIN_E, _MAX_E)
else:
ax.set_xlim(E[-1], E[0])
if divisor_types:
if data_type == 'nuclide':
ylabel = 'Nuclidic Microscopic Data'
elif data_type == 'element':
ylabel = 'Elemental Microscopic Data'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Data'
else:
if data_type == 'nuclide':
ylabel = 'Microscopic Cross Section [b]'
elif data_type == 'element':
ylabel = 'Elemental Cross Section [b]'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Cross Section [1/cm]'
ax.set_ylabel(ylabel)
ax.legend(loc='best')
name = this.name if data_type == 'material' else this
if len(types) > 1:
ax.set_title('Cross Sections for ' + name)
else:
ax.set_title('Cross Section for ' + name)
return fig
def calculate_cexs(this, data_type, types, temperature=294., sab_name=None,
cross_sections=None, enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : {str, openmc.Nuclide, openmc.Element, openmc.Material}
Object to source data from
data_type : {'nuclide', 'element', 'material'}
Type of object to plot
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if sab_name:
cv.check_type('sab_name', sab_name, str)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
if data_type == 'nuclide':
if isinstance(this, str):
nuc = openmc.Nuclide(this)
else:
nuc = this
energy_grid, xs = _calculate_cexs_nuclide(nuc, types, temperature,
sab_name, cross_sections)
# Convert xs (Iterable of Callable) to a grid of cross section values
# calculated on the points in energy_grid for consistency with the
# element and material functions.
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
data[line, :] = xs[line](energy_grid)
elif data_type == 'element':
if isinstance(this, str):
elem = openmc.Element(this)
else:
elem = this
energy_grid, data = _calculate_cexs_elem_mat(elem, types, temperature,
cross_sections, sab_name,
enrichment)
elif data_type == 'material':
cv.check_type('this', this, openmc.Material)
energy_grid, data = _calculate_cexs_elem_mat(this, types, temperature,
cross_sections)
else:
raise TypeError("Invalid type")
return energy_grid, data
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None,
cross_sections=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Nuclide
Nuclide object to source data from
types : Iterable of str or Integral
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES or keys from openmc.data.REACTION_MT which
correspond to a reaction description e.g '(n,2n)' or integers which
correspond to reaction channel (MT) numbers.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : Iterable of Callable
Requested cross section functions
"""
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
# Convert temperature to format needed for access in the library
strT = "{}K".format(int(round(temperature)))
T = temperature
# Now we can create the data sets to be plotted
energy_grid = []
xs = []
lib = library.get_by_material(this)
if lib is not None:
nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path'])
# Obtain the nearest temperature
if strT in nuc.temperatures:
nucT = strT
else:
delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
nucT = nuc.temperatures[closest_index]
# Prep S(a,b) data if needed
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
# Obtain the nearest temperature
if strT in sab.temperatures:
sabT = strT
else:
delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
sabT = sab.temperatures[closest_index]
# Create an energy grid composed the S(a,b) and the nuclide's grid
grid = nuc.energy[nucT]
sab_Emax = 0.
sab_funcs = []
if sab.elastic is not None:
elastic = sab.elastic.xs[sabT]
if isinstance(elastic, openmc.data.CoherentElastic):
grid = np.union1d(grid, elastic.bragg_edges)
if elastic.bragg_edges[-1] > sab_Emax:
sab_Emax = elastic.bragg_edges[-1]
elif isinstance(elastic, openmc.data.Tabulated1D):
grid = np.union1d(grid, elastic.x)
if elastic.x[-1] > sab_Emax:
sab_Emax = elastic.x[-1]
sab_funcs.append(elastic)
if sab.inelastic is not None:
inelastic = sab.inelastic.xs[sabT]
grid = np.union1d(grid, inelastic.x)
if inelastic.x[-1] > sab_Emax:
sab_Emax = inelastic.x[-1]
sab_funcs.append(inelastic)
energy_grid = grid
else:
energy_grid = nuc.energy[nucT]
# Parse the types
mts = []
ops = []
yields = []
for line in types:
if line in PLOT_TYPES:
tmp_mts = [mtj for mti in PLOT_TYPES_MT[line] for mtj in
nuc.get_reaction_components(mti)]
mts.append(tmp_mts)
if line.startswith('nu'):
yields.append(True)
else:
yields.append(False)
if XI_MT in tmp_mts:
ops.append((np.add,) * (len(tmp_mts) - 2) + (np.multiply,))
else:
ops.append((np.add,) * (len(tmp_mts) - 1))
elif line in openmc.data.REACTION_MT:
mt_number = openmc.data.REACTION_MT[line]
cv.check_type('MT in types', mt_number, Integral)
cv.check_greater_than('MT in types', mt_number, 0)
tmp_mts = nuc.get_reaction_components(mt_number)
mts.append(tmp_mts)
ops.append((np.add,) * (len(tmp_mts) - 1))
yields.append(False)
elif isinstance(line, int):
# Not a built-in type, we have to parse it ourselves
cv.check_type('MT in types', line, Integral)
cv.check_greater_than('MT in types', line, 0)
tmp_mts = nuc.get_reaction_components(line)
mts.append(tmp_mts)
ops.append((np.add,) * (len(tmp_mts) - 1))
yields.append(False)
else:
raise TypeError("Invalid type", line)
for i, mt_set in enumerate(mts):
# Get the reaction xs data from the nuclide
funcs = []
op = ops[i]
for mt in mt_set:
if mt == 2:
if sab_name:
# Then we need to do a piece-wise function of
# The S(a,b) and non-thermal data
sab_sum = openmc.data.Sum(sab_funcs)
pw_funcs = openmc.data.Regions1D(
[sab_sum, nuc[mt].xs[nucT]],
[sab_Emax])
funcs.append(pw_funcs)
else:
funcs.append(nuc[mt].xs[nucT])
elif mt in nuc:
if yields[i]:
# Get the total yield first if available. This will be
# used primarily for fission.
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode == 'total':
func = openmc.data.Combination(
[nuc[mt].xs[nucT], prod.yield_],
[np.multiply])
funcs.append(func)
break
else:
# Total doesn't exist so we have to create from
# prompt and delayed. This is used for scatter
# multiplication.
func = None
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode != 'total':
if func:
func = openmc.data.Combination(
[prod.yield_, func], [np.add])
else:
func = prod.yield_
if func:
funcs.append(openmc.data.Combination(
[func, nuc[mt].xs[nucT]], [np.multiply]))
else:
# If func is still None, then there were no
# products. In that case, assume the yield is
# one as its not provided for some summed
# reactions like MT=4
funcs.append(nuc[mt].xs[nucT])
else:
funcs.append(nuc[mt].xs[nucT])
elif mt == UNITY_MT:
funcs.append(lambda x: 1.)
elif mt == XI_MT:
awr = nuc.atomic_weight_ratio
alpha = ((awr - 1.) / (awr + 1.))**2
xi = 1. + alpha * np.log(alpha) / (1. - alpha)
funcs.append(lambda x: xi)
else:
funcs.append(lambda x: 0.)
funcs = funcs if funcs else [lambda x: 0.]
xs.append(openmc.data.Combination(funcs, op))
else:
raise ValueError(this + " not in library")
return energy_grid, xs
def _calculate_cexs_elem_mat(this, types, temperature=294.,
cross_sections=None, sab_name=None,
enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Material or openmc.Element
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
else:
T = temperature
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
if isinstance(this, openmc.Material):
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[1][0]: nuclide[1][1]
for nuclide in nuclides.items()}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[1][0]: nuclide[1][0]
for nuclide in nuclides.items()}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(1., 'ao', enrichment=enrichment,
cross_sections=cross_sections)
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[0]: nuclide[1] for nuclide in nuclides}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[0]: nuclide[0] for nuclide in nuclides}
# Identify the nuclides which have S(a,b) data
sabs = {}
for nuclide in nuclides.items():
sabs[nuclide[0]] = None
if isinstance(this, openmc.Material):
for sab_name in this._sab:
sab = openmc.data.ThermalScattering.from_hdf5(
library.get_by_material(sab_name, data_type='thermal')['path'])
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name,
data_type='thermal')['path']
else:
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name,
data_type='thermal')['path']
# Now we can create the data sets to be plotted
xs = {}
E = []
for nuclide in nuclides.items():
name = nuclide[0]
nuc = nuclide[1]
sab_tab = sabs[name]
temp_E, temp_xs = calculate_cexs(nuc, 'nuclide', types, T, sab_tab,
cross_sections)
E.append(temp_E)
# Since the energy grids are different, store the cross sections as
# a tabulated function so they can be calculated on any grid needed.
xs[name] = [openmc.data.Tabulated1D(temp_E, temp_xs[line])
for line in range(len(types))]
# Condense the data for every nuclide
# First create a union energy grid
energy_grid = E[0]
for grid in E[1:]:
energy_grid = np.union1d(energy_grid, grid)
# Now we can combine all the nuclidic data
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for nuclide in nuclides.items():
name = nuclide[0]
data[line, :] += (nuc_fractions[name] *
xs[name][line](energy_grid))
return energy_grid, data
def calculate_mgxs(this, data_type, types, orders=None, temperature=294.,
cross_sections=None, ce_cross_sections=None,
enrichment=None):
"""Calculates multi-group cross sections of a requested type.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : str or openmc.Material
Object to source data from
data_type : {'nuclide', 'element', 'material', 'macroscopic'}
Type of object to plot
types : Iterable of values of PLOT_TYPES_MGXS
The type of cross sections to calculate
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding an openmc.Element object passed as this
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
cv.check_iterable_type('types', types, str)
cv.check_type("cross_sections", cross_sections, str)
library = openmc.MGXSLibrary.from_hdf5(cross_sections)
if data_type in ('nuclide', 'macroscopic'):
mgxs = _calculate_mgxs_nuc_macro(this, types, library, orders,
temperature)
elif data_type in ('element', 'material'):
mgxs = _calculate_mgxs_elem_mat(this, types, library, orders,
temperature, ce_cross_sections,
enrichment)
else:
raise TypeError("Invalid type")
# Convert the data to the format needed
data = np.zeros((len(types), 2 * library.energy_groups.num_groups))
energy_grid = np.zeros(2 * library.energy_groups.num_groups)
for g in range(library.energy_groups.num_groups):
energy_grid[g * 2: g * 2 + 2] = \
library.energy_groups.group_edges[g: g + 2]
# Ensure the energy will show on a log-axis by replacing 0s with a
# sufficiently small number
energy_grid[0] = max(energy_grid[0], _MIN_E)
for line in range(len(types)):
for g in range(library.energy_groups.num_groups):
data[line, g * 2: g * 2 + 2] = mgxs[line, g]
return energy_grid[::-1], data
def _calculate_mgxs_nuc_macro(this, types, library, orders=None,
temperature=294.):
"""Determines the multi-group cross sections of a nuclide or macroscopic
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Nuclide or openmc.Macroscopic
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check the parameters and grab order/delayed groups
if orders:
cv.check_iterable_type('orders', orders, Integral,
min_depth=len(types), max_depth=len(types))
else:
orders = [None] * len(types)
for i, line in enumerate(types):
cv.check_type("line", line, str)
cv.check_value("line", line, PLOT_TYPES_MGXS)
if orders[i]:
cv.check_greater_than("order value", orders[i], 0, equality=True)
xsdata = library.get_by_name(this)
if xsdata is not None:
# Obtain the nearest temperature
t = np.abs(xsdata.temperatures - temperature).argmin()
# Get the data
data = np.zeros((len(types), library.energy_groups.num_groups))
for i, line in enumerate(types):
if 'fission' in line and not xsdata.fissionable:
continue
elif line == 'unity':
data[i, :] = 1.
else:
# Now we have to get the cross section data and properly
# treat it depending on the requested type.
# First get the data in a generic fashion
temp_data = getattr(xsdata, _PLOT_MGXS_ATTR[line])[t]
shape = temp_data.shape[:]
# If we have angular data, then want the geometric
# average over all provided angles. Since the angles are
# equi-distant, un-weighted averaging will suffice
if xsdata.representation == 'angle':
temp_data = np.mean(temp_data, axis=(0, 1))
# Now we can look at the shape of the data to identify how
# it should be modified to produce an array of values
# with groups.
if shape in (xsdata.xs_shapes["[G']"],
xsdata.xs_shapes["[G]"]):
# Then the data is already an array vs groups so copy
# and move along
data[i, :] = temp_data
elif shape == xsdata.xs_shapes["[G][G']"]:
# Sum the data over outgoing groups to create our array vs
# groups
data[i, :] = np.sum(temp_data, axis=1)
elif shape == xsdata.xs_shapes["[DG]"]:
# Then we have a constant vs groups with a value for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i]]
else:
data[i, :] = np.sum(temp_data[:])
elif shape in (xsdata.xs_shapes["[DG][G']"],
xsdata.xs_shapes["[DG][G]"]):
# Then we have an array vs groups with values for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[DG][G][G']"]:
# Then we have a delayed group matrix. We will first
# remove the outgoing group dependency
temp_data = np.sum(temp_data, axis=-1)
# And then proceed in exactly the same manner as the
# "[DG][G']" or "[DG][G]" shapes in the previous block.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[G][G'][Order]"]:
# This is a scattering matrix with angular data
# First remove the outgoing group dependence
temp_data = np.sum(temp_data, axis=1)
# The user either provided a specific order or we resort
# to the default 0th order
if orders[i]:
order = orders[i]
else:
order = 0
# If the order is available, store the data for that order
# if it is not available, then the expansion coefficient
# is zero and thus we already have the correct value.
if order < shape[1]:
data[i, :] = temp_data[:, order]
else:
raise ValueError("{} not present in provided MGXS "
"library".format(this))
return data
def _calculate_mgxs_elem_mat(this, types, library, orders=None,
temperature=294., ce_cross_sections=None,
enrichment=None):
"""Determines the multi-group cross sections of an element or material
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Element or openmc.Material
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding the elements
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
# Check to see if we have nuclides/elements or a macrocopic object
if this._macroscopic is not None:
# We have macroscopics
nuclides = {this._macroscopic: (this._macroscopic, this.density)}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out nuc and nuc_density
nuc_fraction = [nuclide[1][1] for nuclide in nuclides.items()]
else:
T = temperature
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(100., 'ao', enrichment=enrichment,
cross_sections=ce_cross_sections)
# For ease of processing split out nuc and nuc_fractions
nuc_fraction = [nuclide[1] for nuclide in nuclides]
nuc_data = []
for nuclide in nuclides.items():
nuc_data.append(_calculate_mgxs_nuc_macro(nuclide[0], types, library,
orders, T))
# Combine across the nuclides
data = np.zeros((len(types), library.energy_groups.num_groups))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for n in range(len(nuclides)):
data[line, :] += nuc_fraction[n] * nuc_data[n][line, :]
return data
| mit |
per-andersen/Deltamu | Visualisation.py | 1 | 30055 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.patheffects as patheffects
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import axes3d
import pickle as pick
import Contour
import Deltamu
'''
Program to analyse and plot output from the Deltamu functions.
ToDo:
----Read in pickled data
----Determine how many unique sets of parameters are needed to get min/max delta over redshift space
----Plot the min/max deltamu as a function of redshift
'''
def read_pickled_deltamu(fname):
pkl_data_file = open(fname,'rb')
data = pick.load(pkl_data_file)
pkl_data_file.close()
return data
def unique_par_sets_1d(parameters, redshifts):
unique_par_sets = np.array([])
unique_par_redshifts = np.array([])
for ii in np.arange(len(parameters)):
pars = parameters[ii]
redshift = redshifts[ii]
if pars in unique_par_sets:
pass
else:
unique_par_sets = np.append(unique_par_sets,pars)
unique_par_redshifts = np.append(unique_par_redshifts, redshift)
return unique_par_sets, unique_par_redshifts
def unique_par_sets_3d(parameters, redshifts):
unique_par_sets = []
unique_par_redshifts = np.array([])
for ii in np.arange(len(parameters)):
pars = parameters[ii]
redshift = redshifts[ii]
keep = True
for accepted_sets in unique_par_sets:
if np.abs(np.sum(pars - accepted_sets)) < 0.0001:
keep = False
if keep:
unique_par_sets.append(pars)
unique_par_redshifts = np.append(unique_par_redshifts,redshift)
return unique_par_sets, unique_par_redshifts
def get_unique_parameter_sets(fname):
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
if len(np.shape(parameters_min)) == 1:
unique_par_sets_min, unique_par_redshifts_min = unique_par_sets_1d(parameters_min, redshifts)
unique_par_sets_max, unique_par_redshifts_max = unique_par_sets_1d(parameters_max, redshifts)
elif len(np.shape(parameters_min)) == 2:
unique_par_sets_min, unique_par_redshifts_min = unique_par_sets_3d(parameters_min, redshifts)
unique_par_sets_max, unique_par_redshifts_max = unique_par_sets_3d(parameters_max, redshifts)
#for ii in np.arange(len(unique_par_sets_min)):
# print unique_par_sets_min[ii], unique_par_redshifts_min[ii]
#for ii in np.arange(len(unique_par_sets_max)):
# print unique_par_sets_max[ii], unique_par_redshifts_max[ii]
return unique_par_sets_min, unique_par_sets_max, unique_par_redshifts_min, unique_par_redshifts_max
def get_file_name(chain_name, bins_tuple, contour_level=0.68, tolerance=0.005, do_marg=False,\
redshifts_marg_method='jla', redshift_marg_min=0.1,redshift_marg_max=10., redshift_marg_n=10):
if chain_name == 'lcdm':
f_name = "deltamu_lcdm_c" + str(contour_level) +\
"_t" + str(tolerance) + "_b" + str(bins_tuple) + ".dat"
else:
f_name = "deltamu_" + chain_name + "_c" + str(contour_level) +\
"_t" + str(tolerance) + "_b" + str(bins_tuple[0]) + \
str(bins_tuple[1]) + str(bins_tuple[2]) + ".dat"
if do_marg:
if chain_name == 'lcdm':
f_name = "deltamu_lcdm_c" + str(contour_level) +\
"_t" + str(tolerance) + "_b" + str(bins_tuple) + "_marg_" +\
redshifts_marg_method +"_z" + str(redshift_marg_min) +\
"-" + str(redshift_marg_max) + "_n" + str(redshift_marg_n) + ".dat"
else:
f_name = "deltamu_" + chain_name + "_c" + str(contour_level) +\
"_t" + str(tolerance) + "_b" + str(bins_tuple[0]) + \
str(bins_tuple[1]) + str(bins_tuple[2]) + "_marg_" +\
redshifts_marg_method +"_z" + str(redshift_marg_min) +\
"-" + str(redshift_marg_max) + "_n" + str(redshift_marg_n) + ".dat"
return f_name
def eos_from_chain_name(chain_name, w0, wa, redshifts):
scale_factor = 1. / (1. + redshifts)
if chain_name == 'cpl':
#print 'CPL'
return w0 + wa * (1. - scale_factor)
elif chain_name == 'jbp':
#print 'JBP'
return w0 + wa * (1. - scale_factor) * scale_factor
elif chain_name == 'n3cpl':
#print 'n3CPL'
return w0 + wa * (1. - scale_factor)**3
elif chain_name == 'n7cpl':
#print 'n7CPL'
return w0 + wa * (1. - scale_factor)**7
else:
print "STRANGER DANGER!"
exit()
def is_phantom(chain_name, w0, wa, redshifts):
eos = eos_from_chain_name(chain_name, w0, wa, redshifts)
if np.min(eos) < -1:
return True
else:
#print np.min(eos)
return False
def get_color(shade):
if shade == 'green':
colors = ['g', 'forestgreen','lime']
elif shade == 'red':
colors = ['r', 'sienna','tomato']
return np.random.choice(colors,size=1)[0]
return
def plot_minmax_deltamu(fname, title, legend_string=None):
#redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max = read_pickled_deltamu(fname)
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
#plt.figure()
plt.plot(redshifts, deltamu_min,label=legend_string)
plt.plot(redshifts, deltamu_max)
plt.title(title)
plt.show()
def plot_min_deltamu(fname, title, legend_string=None):
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
#plt.figure()
plt.plot(redshifts, deltamu_min,label=legend_string)
plt.title(title)
def plot_max_deltamu(fname, title, legend_string=None):
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
#plt.figure()
plt.plot(redshifts, deltamu_max,label=legend_string)
plt.title(title)
def plot_deltamu(fname, title, legend_string=None):
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
#plt.figure()
for ii in np.arange(np.shape(deltamu)[1]):
plt.plot(redshifts, deltamu[:,ii],label=legend_string)
plt.title(title)
plt.show()
def oplot_deltamus(chain_name, bins, smoothings, tolerance=0.005, label='CPL', thinning=10, ignore_k=False):
if individual_plots:
plt.figure()
plt.xlabel('Redshift',size='x-large')
plt.ylabel(r'$\Delta \mu$',size='x-large')
plt.ylim((-0.1,0.1))
ii_counter = 0
deltamu_max_global = np.zeros(1000)
deltamu_min_global = np.zeros(1000)
deltamu_nonphantom = np.zeros((1,1000))
for ii in np.arange(len(bins)):
for jj in np.arange(len(smoothings)):
deltamus = Deltamu.Deltamu(chain_name,'',do_marg=True,bins_tuple=bins[ii],smoothing=smoothings[jj],tolerance=tolerance)
fname = root_dir + deltamus.get_marg_file_name()
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
parameters = deltamus.get_parameters(verbose=False)
for kk in np.arange(len(deltamu_min_global)):
if deltamu_max[kk] > deltamu_max_global[kk]:
deltamu_max_global[kk] = deltamu_max[kk]
if deltamu_min[kk] < deltamu_min_global[kk]:
deltamu_min_global[kk] = deltamu_min[kk]
for kk in np.arange(np.shape(deltamu)[1]):
ii_counter += 1
w0 = parameters[1][kk]
wa = parameters[2][kk]
if is_phantom(chain_name, w0, wa, redshifts) == False:
if ignore_k:
deltamu_nonphantom = np.concatenate((deltamu_nonphantom,np.array([deltamu[:,kk]])))
else:
deltamu_nonphantom = np.concatenate((deltamu_nonphantom,np.array([deltamu[:,kk] + marg[kk] - m_bestfit_lcdm_marg])))
if ii_counter == thinning:
if ignore_k:
if is_phantom(chain_name, w0, wa, redshifts):
plt.plot(redshifts, deltamu[:,kk], c=get_color(shade='green'))
#pass
else:
plt.plot(redshifts, deltamu[:,kk], c=get_color(shade='red'))
#pass
else:
if is_phantom(chain_name, w0, wa, redshifts):
plt.plot(redshifts, deltamu[:,kk] + marg[kk] - m_bestfit_lcdm_marg,c=get_color(shade='green'))
#pass
else:
plt.plot(redshifts, deltamu[:,kk] + marg[kk] - m_bestfit_lcdm_marg,c=get_color(shade='red'))
#pass
ii_counter = 0
deltamu_nonphantom_max = np.zeros(1000)
deltamu_nonphantom_min = np.zeros(1000)
for ii in np.arange(len(deltamu_nonphantom_max)):
deltamu_nonphantom_max[ii] = np.max(deltamu_nonphantom[:,ii])
deltamu_nonphantom_min[ii] = np.min(deltamu_nonphantom[:,ii])
if ignore_k==False:
dashes = [20,10]
lmax,=plt.plot(redshifts, deltamu_max_global,'k',ls='--',lw=3)
lmin,=plt.plot(redshifts, deltamu_min_global,'k',ls='--',lw=3)
lmax.set_dashes(dashes)
lmin.set_dashes(dashes)
dashes_nonphantom = [5,5]
llmax,=plt.plot(redshifts, deltamu_nonphantom_max,'r',ls='--',lw=3)
llmin,=plt.plot(redshifts, deltamu_nonphantom_min,'r',ls='--',lw=3)
llmax.set_dashes(dashes_nonphantom)
llmin.set_dashes(dashes_nonphantom)
plt.text(7.5, 0.08, label,size='x-large')
def oplot_deltamu_test(chain_name,bins, smoothings, tolerance=0.005, label='CPL'):
if individual_plots:
fig = plt.figure()
plt.xlabel('Redshift',size='x-large')
plt.ylabel(r'$\Delta \mu$',size='x-large')
plt.ylim((-0.1,0.1))
deltamu_max_global = np.zeros(1000)
deltamu_min_global = np.zeros(1000)
deltamu_max_global_test = np.zeros(1000)
deltamu_min_global_test = np.zeros(1000)
for ii in np.arange(len(bins)):
for jj in np.arange(len(smoothings)):
deltamus = Deltamu.Deltamu(chain_name,'',do_marg=True,bins_tuple=bins[ii],smoothing=smoothings[jj],tolerance=tolerance)
deltamus_test = Deltamu.Deltamu(chain_name,'',do_marg=True,bins_tuple=bins[ii],smoothing=smoothings[jj],tolerance=tolerance,testcase=True)
fname = root_dir + deltamus.get_marg_file_name()
fname_test = root_dir + deltamus_test.get_marg_file_name()
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
redshifts_test, deltamu_min_test, deltamu_max_test, parameters_min_test, parameters_max_test, deltamu_test, marg_test, m_bestfit_lcdm_marg_test = read_pickled_deltamu(fname_test)
for kk in np.arange(len(deltamu_min_global)):
if deltamu_max[kk] > deltamu_max_global[kk]:
deltamu_max_global[kk] = deltamu_max[kk]
if deltamu_min[kk] < deltamu_min_global[kk]:
deltamu_min_global[kk] = deltamu_min[kk]
if deltamu_max_test[kk] > deltamu_max_global_test[kk]:
deltamu_max_global_test[kk] = deltamu_max_test[kk]
if deltamu_min_test[kk] < deltamu_min_global_test[kk]:
deltamu_min_global_test[kk] = deltamu_min_test[kk]
plt.fill_between(redshifts, deltamu_max_global,deltamu_min_global,color='darkgrey',label=label)
plt.fill_between(redshifts_test, deltamu_max_global_test,deltamu_min_global_test,color='lightgrey',hatch='X',label=label + r", $w_0=-1, w_a=0$",edgecolor='darkgrey')
plt.legend(frameon=False)
if individual_plots:
plt.savefig('Figures/test.pdf',format='pdf',dpi=fig.dpi)
#plt.plot(redshifts, deltamu_max_global,c='b')
#plt.plot(redshifts, deltamu_min_global,c='b')
#plt.plot(redshifts_test, deltamu_max_global_test,c='g')
#plt.plot(redshifts_test, deltamu_min_global_test,c='g')
def plot_3d_contours(chain_name, bins, smoothing, tolerance=0.005,labels = ['x','y','z']):
redshift_cut = 0.4
fig_scatter = plt.figure()
ax_scatter = fig_scatter.add_subplot(111, projection='3d')
ax_scatter.set_xlabel(labels[0])
ax_scatter.set_ylabel(labels[1])
ax_scatter.set_zlabel(labels[2])
contour = Contour.Contour(chain_name=chain_name,directory='/Users/perandersen/Data/HzSC/',bins_tuple=bins[0],tolerance=tolerance,smoothing=smoothing)
try:
x_contour, y_contour, z_contour = contour.read_pickled_contour()
except:
contour.pickle_contour()
x_contour, y_contour, z_contour = contour.read_pickled_contour()
ax_scatter.scatter(x_contour, y_contour, z_contour,color='b',s=1.,depthshade=True)
for ii in np.arange(len(bins)):
deltamus = Deltamu.Deltamu(chain_name,'',do_marg=True,bins_tuple=bins[ii],smoothing=smoothing,tolerance=tolerance)
fname = root_dir + deltamus.get_marg_file_name()
unique_par_sets_min, unique_par_sets_max, unique_par_redshifts_min, unique_par_redshifts_max = get_unique_parameter_sets(fname)
unique_par_sets_min = np.array(unique_par_sets_min)
unique_par_sets_max = np.array(unique_par_sets_max)
unique_par_sets_min = unique_par_sets_min[unique_par_redshifts_min > redshift_cut]
unique_par_sets_max = unique_par_sets_max[unique_par_redshifts_max > redshift_cut]
unique_par_redshifts_min = unique_par_redshifts_min[unique_par_redshifts_min > redshift_cut]
unique_par_redshifts_max = unique_par_redshifts_max[unique_par_redshifts_max > redshift_cut]
print unique_par_sets_min
print unique_par_redshifts_min
om_min = unique_par_sets_min[:,0]
w0_min = unique_par_sets_min[:,1]
wa_min = unique_par_sets_min[:,2]
om_max = unique_par_sets_max[:,0]
w0_max = unique_par_sets_max[:,1]
wa_max = unique_par_sets_max[:,2]
color_min = np.zeros((len(unique_par_redshifts_min),3))
for jj in np.arange(len(color_min)):
color_min[jj,0] = 1.
color_min[jj,1] = float(jj) / len(color_min)
color_min[jj,2] = float(jj) / len(color_min)
color_min = color_min[::-1]
color_max = np.zeros((len(unique_par_redshifts_max),3))
for jj in np.arange(len(color_max)):
color_max[jj,0] = 1.
color_max[jj,1] = float(jj) / len(color_max)
color_max[jj,2] = float(jj) / len(color_max)
#print jj, color_max[jj,0], color_max[jj,1], color_max[jj,2]
color_max = color_max[::-1]
ax_scatter.scatter(om_min[:], w0_min[:], wa_min[:], color=color_min,s=100.,depthshade=False, edgecolor='k')
ax_scatter.scatter(om_max[:], w0_max[:], wa_max[:], color=color_max,s=100.,depthshade=False, marker='^', edgecolor='k')
def oplot_3d_contours():
'''
This function tests if the contours produced with different binning
and smoothing settings agree visually. It is really messy, and could
use some cleaning up, if it is to be used more than just the one time.
'''
CPL_Contour = Contour.Contour(chain_name='cpl', directory='/Users/perandersen/Data/HzSC/',bins_tuple=(30,30,30),tolerance = 0.001, smoothing=0.6)
#CPL_Contour_2 = Contour.Contour(chain_name='cpl', directory='/Users/perandersen/Data/HzSC/',bins_tuple=(40,40,40),tolerance = 0.001, smoothing=0.6)
CPL_Contour_2 = Contour.Contour(chain_name='n3cpl', directory='/Users/perandersen/Data/HzSC/',bins_tuple=(60,60,60),tolerance = 0.001, smoothing=0.6)
CPL_Contour_3 = Contour.Contour(chain_name='n3cpl', directory='/Users/perandersen/Data/HzSC/',bins_tuple=(50,50,50),tolerance = 0.001, smoothing=0.6)
x_contour, y_contour, z_contour = CPL_Contour.read_pickled_contour()
x_contour_2, y_contour_2, z_contour_2 = CPL_Contour_2.read_pickled_contour()
x_contour_3, y_contour_3, z_contour_3 = CPL_Contour_3.read_pickled_contour()
print len(x_contour)
print len(x_contour_2)
print len(x_contour_3)
fig_scatter = plt.figure()
ax_scatter = fig_scatter.add_subplot(111, projection='3d')
#ax_scatter.scatter(x_contour, y_contour, z_contour, color='g')
ax_scatter.scatter(x_contour_2, y_contour_2, z_contour_2)
ax_scatter.scatter(x_contour_3, y_contour_3, z_contour_3, color='r')
'''
def plot_equation_of_state(wa_1,wa_2):
redshifts = np.linspace(0.0001,10.,1000)
scale_factor = 1. / (1. + redshifts)
linestyles = ['-','--','-.',':']
fig = plt.figure()
plt.xlabel('Redshift',size='x-large')
plt.ylabel(r'$w(z)$',size='xx-large')
for ii in np.arange(len(wa_1)):
eos_cpl = wa_1[ii][0] + wa_1[ii][1]*(1.-scale_factor)
plt.plot(redshifts, eos_cpl,c='b',ls=linestyles[ii],lw=3, label=r'$w_0$ : ' + str(wa_1[ii][0]) + r', $w_a$ : ' + str(wa_1[ii][1]))
for ii in np.arange(len(wa_2)):
eos_cpl = wa_2[ii][0] + wa_2[ii][1]*((1.-scale_factor)**7)
plt.plot(redshifts, eos_cpl,c='g',ls=linestyles[ii],lw=3, label=r'$w_0$ : ' + str(wa_2[ii][0]) + r', $w_a$ : ' + str(wa_2[ii][1]))
plt.ylim((-1,-0.5))
plt.legend(frameon=False, loc=2, fontsize=17)
plt.xticks(size='x-large')
plt.yticks(size='x-large')
#plt.text(7.5,-0.8,'Thawing\n CPL',size='xx-large',color='b')
#plt.text(6,-0.6,'Freezing\n n7CPL',size='xx-large',color='g')
plt.text(7.5,-0.8,'Thawing',size='xx-large',color='b')
plt.text(6,-0.6,'Freezing',size='xx-large',color='g')
fig.set_tight_layout('True')
plt.savefig('Figures/equationofstate.pdf',format='pdf')
'''
def plot_equation_of_state(wa_1,wa_2):
redshifts = np.linspace(0.0001,10.,1000)
scale_factor = 1. / (1. + redshifts)
linestyles = ['-','--','-.',':']
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Redshift',size='x-large')
plt.ylabel(r'$w(z)$',size='xx-large')
for ii in np.arange(len(wa_1)):
eos_cpl = wa_1[ii][0] + wa_1[ii][1]*(1.-scale_factor)
plt.plot(redshifts, eos_cpl,c='b',ls=linestyles[ii],lw=3, label=r'$w_0$ : ' + str(wa_1[ii][0]) + r', $w_a$ : ' + str(wa_1[ii][1]))
for ii in np.arange(len(wa_2)):
eos_cpl = wa_2[ii][0] + wa_2[ii][1]*((1.-scale_factor)**7)
plt.plot(redshifts, eos_cpl,c='g',ls=linestyles[ii],lw=3, label=r'$w_0$ : ' + str(wa_2[ii][0]) + r', $w_a$ : ' + str(wa_2[ii][1]))
plt.ylim((-1.5,-0.5))
plt.xlim((0,4))
plt.legend(frameon=False, loc=9, fontsize=17,handlelength=2.3)
plt.xticks(size='x-large')
plt.yticks(size='x-large')
plt.text(.5,-0.82,'Convex',size='x-large',color='b',rotation=-14)
plt.text(.5,-1.18,'Concave',size='x-large',color='b',rotation=14)
plt.text(1.6,-0.95,'Convex',size='x-large',color='g',rotation=10)
plt.text(1.6,-1.06,'Concave',size='x-large',color='g',rotation=-8)
ax.add_patch(patches.Rectangle((0,-1.5),4.,0.5,color='grey',alpha=0.2))
txt = plt.text(1.2, -1.3,'Phantom regime',size='xx-large',color='darkgrey')
txt.set_path_effects([patheffects.withStroke(linewidth=0.5,foreground='k')])
fig.set_tight_layout('True')
plt.savefig('Figures/equationofstate.pdf',format='pdf')
def combined_plot():
f, (ax1, ax2, ax3) = plt.subplots(3,2,figsize=(8,10))
log_x_axis = False
plt.sca(ax1[0])
oplot_deltamus('cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='CPL',ignore_k=True,thinning=80)
ax1[0].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax1[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax1[0].set_xticks([0.])
ax1[0].set_xticklabels([''])
ax1[0].text(0.3,0.08,'(a)',size='x-large')
if log_x_axis:
ax1[0].set_xscale("log", nonposx='clip')
plt.sca(ax1[1])
oplot_deltamu_test('cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='CPL')
ax1[1].set_yticks([0.])
ax1[1].set_yticklabels([''])
ax1[1].set_xticks([0.])
ax1[1].set_xticklabels([''])
ax1[1].text(0.3,0.08,'(b)',size='x-large')
if log_x_axis:
ax1[1].set_xscale("log", nonposx='clip')
plt.sca(ax2[0])
oplot_deltamus('cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='CPL',ignore_k=False,thinning=100)
ax2[0].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax2[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax2[0].set_xticks([0.])
ax2[0].set_xticklabels([''])
ax2[0].text(0.3,0.08,'(c)',size='x-large')
if log_x_axis:
ax2[0].set_xscale("log", nonposx='clip')
plt.sca(ax2[1])
oplot_deltamus('jbp', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='JBP',ignore_k=False,thinning=100)
ax2[1].set_yticks([0.])
ax2[1].set_yticklabels([''])
ax2[1].set_xticks([0.])
ax2[1].set_xticklabels([''])
ax2[1].text(0.3,0.08,'(d)',size='x-large')
if log_x_axis:
ax2[1].set_xscale("log", nonposx='clip')
plt.sca(ax3[0])
oplot_deltamus('n3cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='n3CPL',ignore_k=False,thinning=100)
ax3[0].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax3[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax3[0].set_xlabel('Redshift',size='x-large')
ax3[0].text(0.3,0.08,'(e)',size='x-large')
ax3[0].add_patch(patches.Rectangle((1.,-0.068),0.5,0.015,color='g'))
ax3[0].text(1.6,-0.066,'Phantom',size='large')
ax3[0].add_patch(patches.Rectangle((1.,-0.093),0.5,0.015,color='r'))
ax3[0].text(1.6,-0.091,'Non-phantom',size='large')
if log_x_axis:
ax3[0].set_xscale("log", nonposx='clip')
plt.sca(ax3[1])
oplot_deltamus('n7cpl', [(30,30,30),(40,40,40)],[0.4],label='n7CPL',ignore_k=False,thinning=90)
ax3[1].set_xlabel('Redshift',size='x-large')
ax3[1].set_yticks([0.])
ax3[1].set_yticklabels([''])
ax3[1].set_xticks([2,4,6,8,10])
ax3[1].text(0.3,0.08,'(f)',size='x-large')
if log_x_axis:
ax3[1].set_xscale("log", nonposx='clip')
plt.subplots_adjust(left=0.11,bottom=0.05,right=0.98,top=0.98,wspace=0., hspace=0.)
plt.savefig('Figures/combinedplot.pdf',format='pdf')
def additional_plots():
f, (ax1, ax2, ax3) = plt.subplots(3,2,figsize=(8,10))
plt.sca(ax1[0])
oplot_deltamu_test('jbp', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='JBP')
ax1[0].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax1[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax1[0].set_xticks([0.])
ax1[0].set_xticklabels([''])
ax1[0].text(0.3,0.08,'(a)',size='x-large')
plt.sca(ax1[1])
oplot_deltamus('jbp', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='JBP',ignore_k=True,thinning=80)
ax1[1].set_yticks([0.])
ax1[1].set_yticklabels([''])
ax1[1].set_xticks([0.])
ax1[1].set_xticklabels([''])
ax1[1].text(0.3,0.08,'(b)',size='x-large')
plt.sca(ax2[0])
oplot_deltamu_test('n3cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='n3CPL')
ax2[0].set_yticks([0.])
ax2[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax2[0].set_xticklabels([''])
ax2[0].text(0.3,0.08,'(c)',size='x-large')
plt.sca(ax2[1])
oplot_deltamus('n3cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='n3CPL',ignore_k=True,thinning=80)
ax2[1].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax2[1].set_yticklabels([''])
ax2[1].set_xticks([0.])
ax2[1].set_xlabel('Redshift',size='x-large')
ax2[1].text(0.3,0.08,'(d)',size='x-large')
plt.sca(ax3[0])
oplot_deltamu_test('n7cpl', [(30,30,30),(40,40,40)],[0.4],label='n7CPL')
ax3[0].set_ylabel(r'$\mathbf{\Delta \mu}$',size='x-large')
ax3[0].set_yticks([-0.08, -0.04, 0., 0.04, 0.08])
ax3[0].set_xticks([0.])
ax3[0].set_xticklabels([''])
ax3[0].text(0.3,0.08,'(e)',size='x-large')
plt.sca(ax3[1])
oplot_deltamus('n7cpl', [(30,30,30),(40,40,40)],[0.4],label='n7CPL',ignore_k=True,thinning=80)
ax3[1].set_xlabel('Redshift',size='x-large')
ax3[1].set_yticks([0.])
ax3[1].set_yticklabels([''])
ax3[1].set_xticks([2,4,6,8,10])
ax3[1].text(0.3,0.08,'(f)',size='x-large')
plt.subplots_adjust(left=0.11,bottom=0.05,right=0.98,top=0.98,wspace=0., hspace=0.)
plt.savefig('Figures/additionalplots.pdf',format='pdf')
def oplot_deltamu_extrema(chain_names, bins_list, smoothings_list, labels, tolerance=0.005):
if individual_plots:
fig = plt.figure()
ax = plt.subplot(1,1,1)
plt.xlabel('Redshift',size='x-large')
plt.ylabel(r'$\Delta \mu$',size='x-large')
deltamu_max_global = np.zeros((len(chain_names),1000))
deltamu_min_global = np.zeros((len(chain_names),1000))
deltamu_nonphantom = np.zeros((1,1000))
colors = ['g', 'lawngreen', 'limegreen','b']
colors_nonphantom = ['orange','orange','orange','r']
linestyles = ['-','--',':','-']
for ll in np.arange(len(chain_names)):
chain_name = chain_names[ll]
bins = bins_list[ll]
smoothings = smoothings_list[ll]
label = labels[ll]
color = colors[ll]
linestyle = linestyles[ll]
for ii in np.arange(len(bins)):
for jj in np.arange(len(smoothings)):
deltamus = Deltamu.Deltamu(chain_name,'',do_marg=True,bins_tuple=bins[ii],smoothing=smoothings[jj],tolerance=tolerance)
fname = root_dir + deltamus.get_marg_file_name()
redshifts, deltamu_min, deltamu_max, parameters_min, parameters_max, deltamu, marg, m_bestfit_lcdm_marg = read_pickled_deltamu(fname)
parameters = deltamus.get_parameters(verbose=False)
for kk in np.arange(len(deltamu_min_global[ll])):
w0 = parameters[1][kk]
wa = parameters[2][kk]
if is_phantom(chain_name, w0, wa, redshifts) == False:
deltamu_nonphantom = np.concatenate((deltamu_nonphantom,np.array([deltamu[:,kk] + marg[kk] - m_bestfit_lcdm_marg])))
if deltamu_max[kk] > deltamu_max_global[ll][kk]:
deltamu_max_global[ll][kk] = deltamu_max[kk]
if deltamu_min[kk] < deltamu_min_global[ll][kk]:
deltamu_min_global[ll][kk] = deltamu_min[kk]
deltamu_max_nonphantom = np.zeros(1000)
deltamu_min_nonphantom = np.zeros(1000)
for ii in np.arange(len(deltamu_max_nonphantom)):
deltamu_max_nonphantom[ii] = np.max(deltamu_nonphantom[:,ii])
deltamu_min_nonphantom[ii] = np.min(deltamu_nonphantom[:,ii])
plt.plot(redshifts, deltamu_max_global[ll],lw=3,color=color,ls=linestyle,label=label)
plt.plot(redshifts, deltamu_min_global[ll],lw=3,color=color,ls=linestyle)
if chain_name == 'n7cpl' or chain_name == 'cpl':
plt.plot(redshifts, deltamu_max_nonphantom,lw=3,color=colors_nonphantom[ll],ls=linestyle,label=label)
plt.plot(redshifts, deltamu_min_nonphantom,lw=3,color=colors_nonphantom[ll],ls=linestyle)
handles, label_names = ax.get_legend_handles_labels()
handles.insert(6,plt.Line2D(redshifts,deltamu_min_nonphantom,linestyle='none',marker='None'))
label_names.insert(6,'')
handles_plot, labels_plot = ['','','','','','',''], ['']*7
handles_plot[0] = handles[0]
handles_plot[1] = handles[2]
handles_plot[2] = handles[3]
handles_plot[3] = handles[4]
handles_plot[4] = handles[1]
handles_plot[5] = handles[5]
handles_plot[6] = handles[6]
labels_plot[0] = label_names[0]
labels_plot[1] = label_names[2]
labels_plot[2] = label_names[3]
labels_plot[3] = label_names[4]
labels_plot[4] = label_names[1]
labels_plot[5] = label_names[5]
labels_plot[6] = label_names[6]
font0 = FontProperties()
font0.set_size('xx-large')
font0.set_weight('bold')
plt.text(2.3,0.007,'Phantom',fontproperties=font0)
plt.text(6.3,0.007,'Non-phantom',fontproperties=font0)
ax.legend(handles_plot,labels_plot,frameon=False, fontsize=20, ncol=2, bbox_to_anchor=(0.95,0.55), columnspacing=4.)
#ax.legend(handles,label_names,frameon=False, loc=5, fontsize=20, ncol=2)
plt.ylim((-0.06,0.06))
plt.yticks([-0.06, -0.03, 0, 0.03, 0.06],size='x-large')
plt.xticks(size='x-large')
if individual_plots:
fig.set_tight_layout('True')
plt.savefig('Figures/deltamus_extrema.pdf',format='pdf')
root_dir = '/Users/perandersen/Data/HzSC/Deltamu/'
individual_plots = False
#combined_plot()
additional_plots()
#oplot_deltamu_extrema(['cpl', 'jbp', 'n3cpl','n7cpl'],\
#[[(30,30,30),(40,40,40),(50,50,50)], [(30,30,30),(40,40,40),(50,50,50)],[(30,30,30),(40,40,40),(50,50,50)],[(30,30,30),(40,40,40)]],\
#[[0.6],[0.6],[0.6],[0.4]], ['CPL','JBP','n3CPL','n7CPL'])
#plot_3d_contours('n7cpl', [(40,40,40)], 0.4)
#oplot_deltamus('n7cpl', [(30,30,30),(40,40,40)],[0.4],label='n7CPL',ignore_k=True,thinning=10)
#oplot_deltamus('n3cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='n3CPL')
#oplot_deltamus('jbp', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='JBP')
#oplot_deltamus('cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='CPL')
#oplot_deltamus('lcdm', [70,80,90,100],[0.6],tolerance=0.01)
#plt.sca(ax1[1])
#oplot_deltamu_test('n7cpl', [(30,30,30),(40,40,40)],[0.4],label='n7CPL')
#oplot_deltamu_test('n3cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.3],label='n3CPL')
#oplot_deltamu_test('jbp', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='JBP')
#oplot_deltamu_test('cpl', [(30,30,30),(40,40,40),(50,50,50)],[0.6],label='CPL')
#plot_equation_of_state([(-1.,0.1), (-1.,0.2), (-1.,0.3)],[(-1.,0.7), (-1.,0.8), (-1.,.9)])
#plot_equation_of_state([(-0.6,-0.4), (-1.4,0.4)],[(-1.,0.4), (-1.,-0.4)])
#plt.show()
| gpl-3.0 |
askielboe/JAVELIN | examples/demo.py | 1 | 13965 | #Last-modified: 08 Dec 2013 15:54:47
import numpy as np
import matplotlib.pyplot as plt
from javelin.predict import PredictSignal, PredictRmap, generateLine, generateError, PredictSpear
from javelin.lcio import *
from javelin.zylc import LightCurve, get_data
from javelin.lcmodel import Cont_Model, Rmap_Model, Pmap_Model
""" Tests from scratch.
"""
#************** PLEASE DO NOT EDIT THIS PART*****
# show figures interactively
figext = None
# names of the true light curves
names = ["Continuum", "Yelm", "Zing", "YelmBand"]
# dense sampling of the underlying signal
jdense = np.linspace(0.0, 2000.0, 2000)
# DRW parameters
sigma, tau = (3.00, 400.0)
# line parameters
lagy, widy, scaley = (100.0, 2.0, 0.5)
lagz, widz, scalez = (250.0, 4.0, 0.25)
lags = [0.0, lagy, lagz]
wids = [0.0, widy, widz]
scales = [1.0, scaley, scalez]
llags = [ lagy, lagz]
lwids = [ widy, widz]
lscales = [ scaley, scalez]
lcmeans= [10.0, 5.0, 2.5]
#************************************************
def file_exists(fname) :
try :
f = open(fname, "r")
f.close()
return(True)
except IOError:
return(False)
def getTrue(trufile, set_plot=False, mode="test"):
""" Generating dense, error-free light curves as the input signal.
"""
if mode == "test" :
return(None)
elif mode == "show" :
print("read true light curve signal from %s"%trufile)
zydata = get_data(trufile, names=names)
elif mode == "run" :
print("generate true light curve signal")
# zydata = generateTrueLC(covfunc="drw")
# this is the fast way
zydata = generateTrueLC2(covfunc="drw")
print("save true light curve signal to %s"%trufile)
trufile = ".".join([trufile, "myrun"])
zydata.save(trufile)
if set_plot :
print("plot true light curve signal")
zydata.plot(marker="None", ms=1.0, ls="-", lw=2, figout="signal", figext=figext)
return(zydata)
def generateTrueLC(covfunc="drw"):
"""generateTrueLC
covfunc : str, optional
Name of the covariance funtion (default: drw).
"""
# create a `truth' mode light curve set with one continuum and two lines
# object name: loopdeloop
# line1 : yelm
# line2 : zing
jmin = np.max(lags)
jmax = np.max(jdense)-1.0
zylist = []
# this is for handling the prediction for Continuum.
if covfunc == "drw" :
PS = PredictSignal(lcmean=0.0, covfunc=covfunc, sigma=sigma, tau=tau)
elif covfunc == "kepler2_exp" :
PS = PredictSignal(lcmean=0.0, covfunc=covfunc, sigma=sigma, tau=tau,
nu=tau_cut, rank="NearlyFull")
else :
raise RuntimeError("current no such covfunc implemented %s"%covfunc)
# generate signal with no error
edense = np.zeros_like(jdense)
sdense = PS.generate(jdense, ewant=edense)
imin = np.searchsorted(jdense, jmin)
imax = np.searchsorted(jdense, jmax)
zylist.append([jdense[imin: imax], sdense[imin: imax], edense[imin: imax]])
# this is for handling the prediction for Yelm, and Zing.
for i in xrange(1, 3) :
lag = lags[i]
wid = wids[i]
scale= scales[i]
jl, sl = generateLine(jdense, sdense, lag, wid, scale, mc_mean=0.0, ml_mean=0.0)
imin = np.searchsorted(jl, jmin)
imax = np.searchsorted(jl, jmax)
zylist.append([jl[imin: imax], sl[imin: imax], edense[imin: imax]])
# this is for handling the prediction for YelmBand.
# TODO
zydata = LightCurve(zylist, names=names)
return(zydata)
def generateTrueLC2(covfunc="drw"):
""" Generate RMap light curves first, with the sampling designed to allow a post-processing into the line band light curve. The simlest solution is to build light curves on dense regular time axis. The only downside here is that, only 'drw' covariance is allowed.
covfunc : str, optional
Name of the covariance funtion (default: drw).
"""
if covfunc != "drw" :
raise RuntimeError("current no such covfunc implemented for generateTrueLC2 %s"%covfunc)
ps = PredictSpear(sigma, tau, llags, lwids, lscales, spearmode="Rmap")
mdense = np.zeros_like(jdense)
edense = np.zeros_like(jdense)
zylistold = [[jdense, mdense+lcmeans[0], edense], [jdense, mdense+lcmeans[1], edense], [jdense, mdense+lcmeans[2], edense],]
# this is for handling the prediction for Continuum, Yelm, and Zing.
zylistnew = ps.generate(zylistold)
# this is for handling the prediction for YelmBand.
phlc = [jdense, mdense, edense]
phlc[1] = zylistnew[0][1] + zylistnew[1][1]
# combine into a single LightCurve
zylistnew.append(phlc)
zydata = LightCurve(zylistnew, names=names)
return(zydata)
def True2Mock(zydata, sparse=[2, 4, 4], errfac=[0.01, 0.01, 0.01], hasgap=[True, True, True], errcov=0.0):
""" Postprocess true light curves to observed light curves.
Parameters
----------
zydata: LightCurve
Input true LightCurve.
"""
test = np.array([len(sparse), len(errfac), len(hasgap)]) == zydata.nlc
if not np.all(test) :
raise RuntimeError("input dimensions do not match")
zylclist= zydata.zylclist
names = zydata.names
zylclist_new = []
if any(hasgap) :
# some may have gaps, to make sure the sun is synchronized in all light curves, we have to do this globally.
rj = zydata.rj
j0 = zydata.jarr[0]
j1 = zydata.jarr[-1]
ng = np.floor(rj/180.0)
for i in xrange(zydata.nlc):
ispa = np.arange(0, zydata.nptlist[i], sparse[i])
j = zydata.jlist[i][ispa]
# strip off gaps
if hasgap[i] :
dj = np.floor((j - j0)/180.0)
igap = np.where(np.mod(dj, 2) == 0)
indx = ispa[igap]
else :
indx = ispa
j = zydata.jlist[i][indx]
m = zydata.mlist[i][indx] + zydata.blist[i]
e = zydata.elist[i][indx]
# adding errors
e = e*0.0+m*errfac[i]
et= generateError(e, errcov=errcov)
m = m + et
zylclist_new.append([j,m,e])
zymock = LightCurve(zylclist_new, names=names)
return(zymock)
def getMock(zydata, confile, topfile, doufile, phofile, set_plot=False, mode="test") :
""" downsample the truth to get more realistic light curves
"""
if mode == "test" :
return(None)
else :
_c, _y, _z, _yb = zydata.split()
_zydata = _c + _y + _z
zydata_dou = True2Mock(_zydata, sparse=[8, 8, 8], errfac=[0.01, 0.01, 0.01], hasgap=[True, True, True], errcov=0.0)
zylclist_top = zydata_dou.zylclist[:2]
zydata_top = LightCurve(zylclist_top, names=names[0:2])
_zydata = _c + _yb
zydata_pho = True2Mock(_zydata, sparse=[8, 8], errfac=[0.01, 0.01], hasgap=[True, True], errcov=0.0)
if mode == "run" :
confile = ".".join([confile, "myrun"])
doufile = ".".join([doufile, "myrun"])
topfile = ".".join([topfile, "myrun"])
phofile = ".".join([phofile, "myrun"])
zydata_dou.save_continuum(confile)
zydata_dou.save(doufile)
zydata_top.save(topfile)
zydata_pho.save(phofile)
if set_plot :
print("plot mock light curves for continuum, yelm, zing, and yelm band lines")
_c, _yb = zydata_pho.split()
zymock = zydata_dou + _yb
zymock.plot(figout="mocklc", figext=figext)
def fitCon(confile, confchain, names=None, threads=1, set_plot=False, nwalkers=100, nburn=50, nchain=50, mode="test") :
""" fit the continuum model.
"""
if mode == "run" :
confile = ".".join([confile, "myrun"])
zydata = get_data(confile, names=names)
cont = Cont_Model(zydata, "drw")
if mode == "test" :
print(cont([np.log(2.), np.log(100)], set_retq=True))
return(None)
elif mode == "show" :
cont.load_chain(confchain)
elif mode == "run" :
confchain = ".".join([confchain, "myrun"])
cont.do_mcmc(nwalkers=nwalkers, nburn=nburn, nchain=nchain, fburn=None, fchain=confchain, threads=1)
if set_plot :
cont.show_hist(bins=100, figout="mcmc0", figext=figext)
return(cont.hpd)
def fitLag(linfile, linfchain, conthpd, names=None, lagrange=[50, 300], lagbinsize=1, threads=1, set_plot=False, nwalkers=100, nburn=50, nchain=50, mode="test") :
""" fit the Rmap model.
"""
if mode == "run" :
linfile = ".".join([linfile, "myrun"])
zydata = get_data(linfile, names=names)
rmap = Rmap_Model(zydata)
if mode == "test" :
if zydata.nlc == 2 :
print(rmap([np.log(2.), np.log(100), lagy, widy, scaley ]))
elif zydata.nlc == 3 :
print(rmap([np.log(2.), np.log(100), lagy, widy, scaley, lagz, widz, scalez]))
return(None)
elif mode == "show" :
rmap.load_chain(linfchain, set_verbose=False)
elif mode == "run" :
laglimit = [[0.0, 400.0],]*(zydata.nlc-1)
print(laglimit)
# laglimit = "baseline"
linfchain = ".".join([linfchain, "myrun"])
rmap.do_mcmc(conthpd=conthpd, lagtobaseline=0.5, laglimit=laglimit,
nwalkers=nwalkers, nburn=nburn, nchain=nchain,
fburn=None, fchain=linfchain, threads=threads)
if set_plot :
rmap.break_chain([lagrange,]*(zydata.nlc-1))
rmap.get_hpd()
if zydata.nlc == 2 :
figout = "mcmc1"
else :
figout = "mcmc2"
rmap.show_hist(bins=100, lagbinsize=lagbinsize, figout=figout, figext=figext)
return(rmap.hpd)
def fitPmap(phofile, phofchain, conthpd, names=None, lagrange=[50, 300], lagbinsize=1, threads=1, set_plot=False, nwalkers=100, nburn=50, nchain=50,mode="test") :
""" fit the Pmap model.
"""
if mode == "run" :
phofile = ".".join([phofile, "myrun"])
zydata = get_data(phofile, names=names)
pmap = Pmap_Model(zydata)
if mode == "test" :
print(pmap([np.log(2.), np.log(100), lagy, widy, scaley, 1.0]))
return(None)
elif mode == "show" :
pmap.load_chain(phofchain, set_verbose=False)
elif mode == "run" :
laglimit = [[50.0, 130.0]] # XXX here we want to avoid 180 day limit.
phofchain = ".".join([phofchain, "myrun"])
pmap.do_mcmc(conthpd=conthpd, lagtobaseline=0.5, laglimit=laglimit,
nwalkers=nwalkers, nburn=nburn, nchain=nchain,
fburn=None, fchain=phofchain, threads=threads)
if set_plot :
pmap.break_chain([lagrange,])
pmap.get_hpd()
pmap.show_hist(bins=100, lagbinsize=lagbinsize, figout="mcmc3", figext=figext)
return(pmap.hpd)
def showfit(linhpd, linfile, names=None, set_plot=False, mode="test") :
if mode == "run" :
linfile = ".".join([linfile, "myrun"])
print linfile
zydata = get_data(linfile, names=names)
rmap = Rmap_Model(zydata)
if mode == "test" :
return(None)
else :
zypred = rmap.do_pred(linhpd[1,:])
zypred.names = names
if set_plot :
zypred.plot(set_pred=True, obs=zydata, figout="prediction", figext=figext)
def demo(mode) :
""" Demonstrate the main functionalities of JAVELIN.
Parameters
----------
mode: string
"test" : just go through some likelihood calculation to make sure JAVELIN is correctly installed.
"show" : load example light curves and chains and show plots.
"run" : regenerate all the light curves and chains.
"""
from sys import platform as _platform
if True :
if mode == "test" :
set_plot = False
elif mode == "show" :
set_plot = True
elif mode == "run" :
set_plot = True
try :
import multiprocessing
if _platform == "darwin" :
# for some reason, Mac cannot handle the pools in emcee.
threads = 1
else :
threads = multiprocessing.cpu_count()
except (ImportError,NotImplementedError) :
threads = 1
if threads > 1 :
print("use multiprocessing on %d cpus"%threads)
else :
print("use single cpu")
# source variability
trufile = "dat/trulc.dat"
# observed continuum light curve w/ seasonal gap
confile = "dat/loopdeloop_con.dat"
# observed continuum+y light curve w/ seasonal gap
topfile = "dat/loopdeloop_con_y.dat"
# observed continuum+y+z light curve w/ seasonal gap
doufile = "dat/loopdeloop_con_y_z.dat"
# observed continuum band+y band light curve w/out seasonal gap
phofile = "dat/loopdeloop_con_yb.dat"
# file for storing MCMC chains
confchain = "dat/chain0.dat"
topfchain = "dat/chain1.dat"
doufchain = "dat/chain2.dat"
phofchain = "dat/chain3.dat"
# generate truth drw signal
zydata = getTrue(trufile, set_plot=set_plot, mode=mode)
# generate mock light curves
getMock(zydata, confile, topfile, doufile, phofile, set_plot=set_plot, mode=mode)
# fit continuum
conthpd = fitCon(confile, confchain, names=names[0:1], threads=threads, set_plot=set_plot, mode=mode)
# fit tophat
tophpd = fitLag(topfile, topfchain, conthpd, names=names[0:2], threads=threads, set_plot=set_plot, mode=mode)
# fit douhat
douhpd = fitLag(doufile, doufchain, conthpd, names=names[0:3], threads=threads, nwalkers=100, nburn=100, nchain=100,set_plot=set_plot, mode=mode)
# show fit
showfit(douhpd, doufile, names=names[0:3], set_plot=set_plot, mode=mode)
# fit pmap
phohpd = fitPmap(phofile, phofchain, conthpd, names=[names[0], names[3]], lagrange=[0, 150], lagbinsize=0.2, threads=threads, nwalkers=100, nburn=100, nchain=100,set_plot=set_plot, mode=mode)
if __name__ == "__main__":
import sys
mode = sys.argv[1]
demo(mode)
| gpl-2.0 |
bzero/statsmodels | statsmodels/tsa/tests/test_stattools.py | 26 | 12110 | from statsmodels.compat.python import lrange
from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf,
arma_order_select_ic)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
dec, assert_)
from numpy import genfromtxt#, concatenate
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index, DataFrame
import os
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
"""
Test Autocorrelation Function using FFT
"""
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0., 0.])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp', 'realcons']]
data = mdata.view((float, 2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'], decimal=7)
def test_granger_fails_on_nobs_check(self):
# Test that if maxlag is too large, Granger Test raises a clear error.
X = np.random.rand(10, 2)
grangercausalitytests(X, 2, verbose=False) # This should pass.
assert_raises(ValueError, grangercausalitytests, X, 3, verbose=False)
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
def test_acovf_fft_vs_convolution():
np.random.seed(1)
q = np.random.normal(size=100)
for demean in [True, False]:
for unbiased in [True, False]:
F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True)
F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@dec.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
import statsmodels.api as sm
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x , index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x , index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array([ 0.86074377817203640006, 0.85316549067906921611,
0.87104653774363305363, 0.60692382068987393851,
0.69225941967301307667, 0.73336177248909339976,
0.03661329261479619179, 0.15693067239962379955,
0.12777403512447857437, -0.27531446294481976 ,
-0.24198139631653581283, -0.23903317951236391359,
-0.26000241325906497947, -0.21282920015519238288,
-0.15943768324388354896, 0.25169301564268781179,
0.1762305709151877342 , 0.12678133368791388857,
0.89755829086753169399, 0.82667068795350151511])
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(sunspots.load_pandas().data[['SUNACTIVITY']], fft=True)
assert_equal(result.ndim, 1)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
| bsd-3-clause |
Huyuwei/tvm | nnvm/tutorials/from_mxnet.py | 2 | 5160 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy mxnet models with NNVM.
For us to begin with, mxnet module is required to be installed.
A quick solution is
.. code-block:: bash
pip install mxnet --user
or please refer to offical installation guide.
https://mxnet.incubator.apache.org/versions/master/install/index.html
"""
# some standard imports
import mxnet as mx
import numpy as np
import nnvm
import tvm
from tvm.contrib.download import download_testdata
######################################################################
# Download Resnet18 model from Gluon Model Zoo
# ---------------------------------------------
# In this section, we download a pretrained imagenet model and classify an image.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
from matplotlib import pyplot as plt
block = get_model('resnet18_v1', pretrained=True)
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_name = 'cat.png'
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/',
'imagenet1000_clsid_to_human.txt'])
synset_name = 'imagenet1000_clsid_to_human.txt'
img_path = download_testdata(img_url, img_name, module='data')
synset_path = download_testdata(synset_url, synset_name, module='data')
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123., 117., 104.])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print('x', x.shape)
######################################################################
# Compile the Graph
# -----------------
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
sym, params = nnvm.frontend.from_mxnet(block)
# we want a probability so add a softmax operator
sym = nnvm.sym.softmax(sym)
######################################################################
# now compile the graph
import nnvm.compiler
target = 'cuda'
shape_dict = {'data': x.shape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_runtime
ctx = tvm.gpu(0)
dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()[0])
print('TVM prediction top-1:', top1, synset[top1])
######################################################################
# Use MXNet symbol with pretrained weights
# ----------------------------------------
# MXNet often use `arg_params` and `aux_params` to store network parameters
# separately, here we show how to use these weights with existing API
def block2symbol(block):
data = mx.sym.Variable('data')
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
# usually we would save/load it as checkpoint
mx.model.save_checkpoint('resnet18_v1', 0, mx_sym, args, auxs)
# there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk
######################################################################
# for a normal mxnet model, we start from here
mx_sym, args, auxs = mx.model.load_checkpoint('resnet18_v1', 0)
# now we use the same API to get NNVM compatible symbol
nnvm_sym, nnvm_params = nnvm.frontend.from_mxnet(mx_sym, args, auxs)
# repeat the same steps to run this model using TVM
| apache-2.0 |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/measure_resolution/lmfit-py/doc/sphinx/numpydoc/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/parser/python_parser_only.py | 7 | 7755 | # -*- coding: utf-8 -*-
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
import sys
import nose
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas import compat
from pandas.compat import StringIO, BytesIO, u
class PythonParserTests(object):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(
ValueError, 'skip footer cannot be negative'):
self.read_csv(StringIO(text), skipfooter=-1)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assert_index_equal(data.index,
Index(['foo', 'bar', 'baz'], name='index'))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = self.read_table(data, sep="::", encoding='cp1255')
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_single_line(self):
# see gh-6607: sniff separator
buf = StringIO()
sys.stdout = buf
try:
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_skipfooter(self):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# see gh-6607
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
with open(self.csv1, 'rb') as f:
data = f.read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# see gh-6607
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = self.read_table(StringIO(text), sep=r'\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# see gh-6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep=r'\s+')
tm.assert_frame_equal(actual, expected)
def test_skipfooter_with_decimal(self):
# see gh-6971
data = '1#2\n3#4'
expected = DataFrame({'a': [1.2, 3.4]})
result = self.read_csv(StringIO(data), names=['a'],
decimal='#')
tm.assert_frame_equal(result, expected)
# the stray footer line should not mess with the
# casting of the first t wo lines if we skip it
data = data + '\nFooter'
result = self.read_csv(StringIO(data), names=['a'],
decimal='#', skipfooter=1)
tm.assert_frame_equal(result, expected)
def test_encoding_non_utf8_multichar_sep(self):
# see gh-3404
expected = DataFrame({'a': [1], 'b': [2]})
for sep in ['::', '#####', '!!!', '123', '#1!c5',
'%!c!d', '@@#4:2', '_!pd#_']:
data = '1' + sep + '2'
for encoding in ['utf-16', 'utf-16-be', 'utf-16-le',
'utf-32', 'cp037']:
encoded_data = data.encode(encoding)
result = self.read_csv(BytesIO(encoded_data),
sep=sep, names=['a', 'b'],
encoding=encoding)
tm.assert_frame_equal(result, expected)
def test_multi_char_sep_quotes(self):
# see gh-13374
data = 'a,,b\n1,,a\n2,,"2,,b"'
msg = 'ignored when a multi-char delimiter is used'
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), sep=',,')
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with tm.assertRaises(AssertionError):
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), sep=',,',
quoting=csv.QUOTE_NONE)
def test_skipfooter_bad_row(self):
# see gh-13879
data = 'a,b,c\ncat,foo,bar\ndog,foo,"baz'
msg = 'parsing errors in the skipped footer rows'
with tm.assertRaisesRegexp(csv.Error, msg):
self.read_csv(StringIO(data), skipfooter=1)
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with tm.assertRaises(AssertionError):
with tm.assertRaisesRegexp(csv.Error, msg):
self.read_csv(StringIO(data))
| gpl-3.0 |
michaelaye/scikit-image | doc/examples/plot_local_binary_pattern.py | 17 | 6774 | """
===============================================
Local Binary Pattern for texture classification
===============================================
In this example, we will see how to classify textures based on LBP (Local
Binary Pattern). LBP looks at points surrounding a central point and tests
whether the surrounding points are greater than or less than the central point
(i.e. gives a binary result).
Before trying out LBP on an image, it helps to look at a schematic of LBPs.
The below code is just used to plot the schematic.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
METHOD = 'uniform'
plt.rcParams['font.size'] = 9
def plot_circle(ax, center, radius, color):
circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
ax.add_patch(circle)
def plot_lbp_model(ax, binary_values):
"""Draw the schematic for a local binary pattern."""
# Geometry spec
theta = np.deg2rad(45)
R = 1
r = 0.15
w = 1.5
gray = '0.5'
# Draw the central pixel.
plot_circle(ax, (0, 0), radius=r, color=gray)
# Draw the surrounding pixels.
for i, facecolor in enumerate(binary_values):
x = R * np.cos(i * theta)
y = R * np.sin(i * theta)
plot_circle(ax, (x, y), radius=r, color=str(facecolor))
# Draw the pixel grid.
for x in np.linspace(-w, w, 4):
ax.axvline(x, color=gray)
ax.axhline(x, color=gray)
# Tweak the layout.
ax.axis('image')
ax.axis('off')
size = w + 0.2
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
binary_patterns = [np.zeros(8),
np.ones(8),
np.hstack([np.ones(4), np.zeros(4)]),
np.hstack([np.zeros(3), np.ones(5)]),
[1, 0, 0, 1, 1, 1, 0, 0]]
for ax, values, name in zip(axes, binary_patterns, titles):
plot_lbp_model(ax, values)
ax.set_title(name)
"""
.. image:: PLOT2RST.current_figure
The figure above shows example results with black (or white) representing
pixels that are less (or more) intense than the central pixel. When surrounding
pixels are all black or all white, then that image region is flat (i.e.
featureless). Groups of continuous black or white pixels are considered
"uniform" patterns that can be interpreted as corners or edges. If pixels
switch back-and-forth between black and white pixels, the pattern is considered
"non-uniform".
When using LBP to detect texture, you measure a collection of LBPs over an
image patch and look at the distribution of these LBPs. Lets apply LBP to
a brick texture.
"""
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data
from skimage.color import label2rgb
# settings for LBP
radius = 3
n_points = 8 * radius
def overlay_labels(image, lbp, labels):
mask = np.logical_or.reduce([lbp == each for each in labels])
return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
def highlight_bars(bars, indexes):
for i in indexes:
bars[i].set_facecolor('r')
image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)
def hist(ax, lbp):
n_bins = lbp.max() + 1
return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()
titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
i_14 = n_points // 4 # 1/4th of the histogram
i_34 = 3 * (n_points // 4) # 3/4th of the histogram
corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
list(range(i_34 - w, i_34 + w + 1)))
label_sets = (edge_labels, flat_labels, corner_labels)
for ax, labels in zip(ax_img, label_sets):
ax.imshow(overlay_labels(image, lbp, labels))
for ax, labels, name in zip(ax_hist, label_sets, titles):
counts, _, bars = hist(ax, lbp)
highlight_bars(bars, labels)
ax.set_ylim(ymax=np.max(counts[:-1]))
ax.set_xlim(xmax=n_points + 2)
ax.set_title(name)
ax_hist[0].set_ylabel('Percentage')
for ax in ax_img:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The above plot highlights flat, edge-like, and corner-like regions of the
image.
The histogram of the LBP result is a good measure to classify textures. Here,
we test the histogram distributions against each other using the
Kullback-Leibler-Divergence.
"""
# settings for LBP
radius = 2
n_points = 8 * radius
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
def match(refs, img):
best_score = 10
best_name = None
lbp = local_binary_pattern(img, n_points, radius, METHOD)
n_bins = lbp.max() + 1
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
for name, ref in refs.items():
ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins,
range=(0, n_bins))
score = kullback_leibler_divergence(hist, ref_hist)
if score < best_score:
best_score = score
best_name = name
return best_name
brick = data.load('brick.png')
grass = data.load('grass.png')
wall = data.load('rough-wall.png')
refs = {
'brick': local_binary_pattern(brick, n_points, radius, METHOD),
'grass': local_binary_pattern(grass, n_points, radius, METHOD),
'wall': local_binary_pattern(wall, n_points, radius, METHOD)
}
# classify rotated textures
print('Rotated images matched against references using LBP:')
print('original: brick, rotated: 30deg, match result: ',
match(refs, rotate(brick, angle=30, resize=False)))
print('original: brick, rotated: 70deg, match result: ',
match(refs, rotate(brick, angle=70, resize=False)))
print('original: grass, rotated: 145deg, match result: ',
match(refs, rotate(grass, angle=145, resize=False)))
# plot histograms of LBP of textures
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
figsize=(9, 6))
plt.gray()
ax1.imshow(brick)
ax1.axis('off')
hist(ax4, refs['brick'])
ax4.set_ylabel('Percentage')
ax2.imshow(grass)
ax2.axis('off')
hist(ax5, refs['grass'])
ax5.set_xlabel('Uniform LBP values')
ax3.imshow(wall)
ax3.axis('off')
hist(ax6, refs['wall'])
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 26651 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
MartinThoma/algorithms | ML/movielens-20m/ml-20m/movies_analysis.py | 1 | 2395 | from collections import Counter
from itertools import combinations
import clana.io
import clana.visualize_cm
import networkx as nx
import numpy as np
import pandas as pd
import progressbar
# Load the data
df = pd.read_csv("movies.csv")
df["genres"] = df["genres"].str.split("|")
# Analyze the data
list_values = [value for valueset in df["genres"].tolist() for value in valueset]
value_count = Counter(list_values)
print("* Movies: {}".format(len(df)))
print("* Unique genres: {}".format(len(value_count)))
print("* Most common:")
most_common = sorted(value_count.items(), key=lambda n: n[1], reverse=True)
for name, count in most_common[:10]:
print(f" {count:>4}x {name}")
unique_genres = sorted(list(value_count.keys()))
def get_biggest_clusters(edges, n=10):
G = nx.Graph()
for authorset in edges.tolist():
for author in authorset:
G.add_node(author)
for authorset in progressbar.progressbar(df["genres"].tolist()[:10_000]):
for author1, author2 in combinations(authorset, 2):
G.add_edge(author1, author2)
print("Edges were added")
components = [c for c in sorted(nx.connected_components(G), key=len, reverse=True)]
return components[:n]
def create_matrix(nodes, edges):
n2i = {node: i for i, node in enumerate(sorted(nodes))}
# node to index
mat = np.zeros((len(nodes), len(nodes)), dtype=np.int32)
for edge in edges:
for a, b in combinations(edge, 2):
if a not in n2i or b not in n2i:
continue
mat[n2i[a]][n2i[b]] += 1
if a != b:
mat[n2i[b]][n2i[a]] += 1
return mat, sorted(nodes)
components = get_biggest_clusters(df["genres"])
print("* Biggest clusters: {}".format([len(el) for el in components]))
component_w_publications = [(author, value_count[author]) for author in components[0]]
component_w_publications = sorted(
component_w_publications, key=lambda n: n[1], reverse=True
)
authors = [author for author, count in component_w_publications[:1_00]]
mat, labels = create_matrix(authors, df["genres"].tolist())
clana.io.write_cm("genre-combinations.json", mat)
clana.io.write_labels("labels.json", labels)
clana.visualize_cm.main(
"genre-combinations.json",
perm_file="",
steps=1_000_000,
labels_file="labels.json",
zero_diagonal=False,
output="cm-genre-combinations.pdf",
)
| mit |
shenzebang/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/nltk/probability.py | 3 | 89919 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
# Ilia Kurenkov <ilia.kurenkov@gmail.com> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals, division
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return self[sample] / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
width = max(len("%s" % s) for s in samples)
width = max(width, max(len("%d" % f) for f in freqs))
for i in range(len(samples)):
print("%*s" % (width, samples[i]), end=' ')
print()
for i in range(len(samples)):
print("%*d" % (width, freqs[i]), end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
# Mathematical operatiors
def __add__(self, other):
"""
Add counts from two counters.
>>> FreqDist('abbb') + FreqDist('bcc')
FreqDist({'b': 4, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__add__(other))
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
>>> FreqDist('abbbc') - FreqDist('bccd')
FreqDist({'b': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__sub__(other))
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
>>> FreqDist('abbb') | FreqDist('bcc')
FreqDist({'b': 3, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__or__(other))
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
>>> FreqDist('abbb') & FreqDist('bcc')
FreqDist({'b': 1})
"""
return self.__class__(super(FreqDist, self).__and__(other))
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / (self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / (self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = sum(log_r) / len(log_r)
y_mean = sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = self._freqdist.Nr(count+1)
Er = self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param conditions: The conditions to plot (default is all)
:type conditions: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
width = max(len("%s" % s) for s in samples)
freqs = dict()
for c in conditions:
if cumulative:
freqs[c] = list(self[c]._cumulative_frequencies(samples))
else:
freqs[c] = [self[c][sample] for sample in samples]
width = max(width, max(len("%d" % f) for f in freqs[c]))
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%*s" % (width, s), end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
for f in freqs[c]:
print("%*d" % (width, f), end=' ')
print()
# Mathematical operators
def __add__(self, other):
"""
Add counts from two ConditionalFreqDists.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] + other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] - other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count < 0:
result[cond][elem] = 0 - count
return result
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] | other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] & other[cond]
if newfreqdist:
result[cond] = newfreqdist
return result
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| mit |
RichardTMR/homework | week3/Codelab2/tool_show_size.py | 1 | 1681 |
import tensorflow as tf
import matplotlib.pyplot as plt
import tools
cat = plt.imread('cat.jpg') #unit8
plt.imshow(cat)
cat = tf.cast(cat, tf.float32) #[360, 300, 3]
x = tf.reshape(cat, [1, 360, 300, 3]) #[1, 360, 300, 3]
# First conv
with tf.variable_scope('conv1'):
w = tools.weight([3,3,3,16], is_uniform=True)
x_w = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
b = tools.bias([16])
x_b = tf.nn.bias_add(x_w, b)
x_relu = tf.nn.relu(x_b)
x_pool = tools.pool('test1', x_relu, kernel=[1,2,2,1], stride=[1,2,2,1],is_max_pool=True)
# Second conv
with tf.variable_scope('conv2'):
w2 = tools.weight([3,3,16,32], is_uniform=True)
x_w2 = tf.nn.conv2d(x_pool, w2, strides=[1, 1, 1, 1], padding='SAME')
b2 = tools.bias([32])
x_b2 = tf.nn.bias_add(x_w2, b2)
x_relu2 = tf.nn.relu(x_b2)
x_pool2 = tools.pool('test2',x_relu2, kernel=[1,2,2,1],stride=[1,2,2,1], is_max_pool=False)
x_BN = tools.batch_norm(x_pool2)
#
def shape(x):
return str(x.get_shape())
# First conv
print('\n')
print('** First conv: **\n')
print('input size: ', shape(x))
print('w size:', shape(w))
print('x_w size: ', shape(x_w))
print('b size: ', shape(b))
print('x_b size: ', shape(x_b))
print('x_relu size: ', shape(x_relu))
print('x_pool size: ', shape(x_pool))
print('\n')
# Second conv
print('** Second conv: **\n')
print('input size: ', shape(x_pool))
print('w2 size:', shape(w2))
print('x_w2 size: ', shape(x_w2))
print('b2 size: ', shape(b2))
print('x_b2 size: ', shape(x_b2))
print('x_relu2 size: ', shape(x_relu2))
print('x_pool2 size: ', shape(x_pool2))
print('x_BN size: ', shape(x_BN))
print('\n')
| apache-2.0 |
kyleabeauchamp/HMCNotes | code/correctness/run_samples_grid_ljbox.py | 1 | 3519 | import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
from collections import OrderedDict
def get_grid(sysname, temperature, timestep, langevin_timestep, groups, steps_per_hmc=100, extra_chances=5):
integrators = OrderedDict()
for timestep in [1.0 * langevin_timestep, 4.0 * langevin_timestep]:
collision_rate = 1.0 / u.picoseconds
integrator = mm.LangevinIntegrator(temperature, collision_rate, timestep)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
fmt_string = lb_loader.format_name(prms)
integrators[fmt_string] = integrator
collision_rate = None
for timestep in [timestep, 20 * u.femtoseconds]:
integrator = hmc_integrators.GHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, collision_rate=collision_rate)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
fmt_string = lb_loader.format_name(prms)
integrators[fmt_string] = integrator
collision_rate = None
for timestep in [timestep]:
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances, collision_rate=collision_rate)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=timestep / u.femtoseconds, collision=lb_loader.fixunits(collision_rate))
fmt_string = lb_loader.format_name(prms)
integrators[fmt_string] = integrator
xcghmc_parms = dict(timestep=32.235339 * u.femtoseconds, steps_per_hmc=16, extra_chances=1, collision_rate=None)
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, **xcghmc_parms)
itype = type(integrator).__name__
prms = dict(sysname=sysname, itype=itype, timestep=integrator.timestep / u.femtoseconds, collision=lb_loader.fixunits(None))
fmt_string = lb_loader.format_name(prms)
integrators[fmt_string] = integrator
return integrators
walltime = 9.0 * u.hours
sysname = "switchedljbox"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes = lb_loader.equilibrate(testsystem, temperature, timestep, steps=equil_steps, minimize=True)
for fmt_string, integrator in get_grid(sysname, temperature, timestep, langevin_timestep, groups).items():
itype = type(integrator).__name__
print("%s %s" % (fmt_string, itype))
csv_filename = "./data/%s.csv" % fmt_string
pdb_filename = "./data/%s.pdb" % fmt_string
dcd_filename = "./data/%s.dcd" % fmt_string
simulation = lb_loader.build(testsystem, integrator, temperature)
simulation.step(5)
output_frequency = 100 if "Langevin" in itype else 2
kineticEnergy = True if "MJHMC" in itype else False
simulation.reporters.append(app.StateDataReporter(csv_filename, output_frequency, step=True, time=True, potentialEnergy=True, kineticEnergy=kineticEnergy, temperature=True, density=True, elapsedTime=True))
simulation.reporters.append(app.DCDReporter(dcd_filename, output_frequency))
simulation.runForClockTime(walltime)
| gpl-2.0 |
bchappet/dnfpy | src/dnfpy/cellular/rsdnf2LayerConvolutionTest.py | 1 | 5065 | from dnfpy.core.constantMap import ConstantMap
import unittest
import numpy as np
import dnfpy.view.staticViewMatplotlib as view
from dnfpy.cellular.rsdnf2LayerConvolution import Rsdnf2LayerConvolution
import matplotlib.pyplot as plt
class Rsdnf2LayerConvolutionTest(unittest.TestCase):
def setUp(self):
self.size = 100
self.activation = np.zeros((self.size,self.size),np.intc)
self.uut = Rsdnf2LayerConvolution("uut",self.size,activation=self.activation)
self.uut.reset()
self.activation[self.size//2,self.size//2] = 1
self.uut.setParams(pExc=1.0,pInh=1.0,nspike=20)
# def testConvolution1(self):
# self.uut.setParams(shift=1)
# self.uut.setParams(nspike=200,iExc=1.25,iInh=0.7,pExc=0.0043,pInh=0.4)
# for i in range(200):
# self.uut.compute()
# data = self.uut.getData()
# view.plotArray(data)
# view.show()
def testResetLat(self):
"""
Check that everything is reset after call to reset lat
"""
self.uut.setParams(nspike=2000,iExc=1.25,iInh=0.7,pExc=0.0043,pInh=0.4)
for i in range(120):
self.uut.compute()
data = self.uut.getData()
self.activation[...] = 0
self.uut.resetLat()
for i in range(120):
self.uut.compute()
data = self.uut.getData()
assert(np.sum(data) == 0)
def testComputeP1(self):
for i in range(120):
self.uut.compute()
data = self.uut.excMap
self.assertEqual(data[self.size//2+1,self.size//2+1],20)
# def testWorstCaseScenario(self):
# self.activation[self.size//2-5:self.size//2+5,self.size//2-5:self.size//2+5] = 1
# self.uut.setParams(nspike=20)
#
# for i in range(100*20 + 200):
# self.uut.compute()
# data = self.uut.excMap
# self.assertEqual(np.sum(data),100*100*100*20 - 100*20)
def testComputeActivationNspike1(self):
self.uut.setParams(nspike=1)
self.activation[self.size//2,self.size//2] = 1
for i in range(102):
self.uut.compute()
data = self.uut.excMap
self.assertEqual(np.sum(data),self.size**2-1)
def testComputeActivationNspike10(self):
self.uut.setParams(nspike=10)
self.activation[self.size//2,self.size//2] = 1
for i in range(140):
self.uut.compute()
data = self.uut.excMap
self.assertEqual(np.sum(data),10*(self.size**2)-10)
# def testComputeReset(self):
# self.uut.setParams(nspike=1)
# self.activation[self.size//2,self.size//2] = 1
# self.uut.setParams(proba=1.0)
#
# for i in range(6):
# self.uut.compute()
# data = self.uut.excMap
# self.assertEqual(data[self.size//2+4,self.size//2],1)
# self.assertEqual(data[self.size//2+5,self.size//2],0)
#
# self.uut.resetLat()
#
# for i in range(5):
# self.uut.compute()
# data = self.uut.excMap
# self.assertEqual(data[self.size//2+4,self.size//2],1)
# self.assertEqual(data[self.size//2+5,self.size//2],0)
#
#
# def testMultiActivation(self):
# self.uut.setParams(nspike=9)
# self.activation[self.size//2,self.size//2] = 1
# self.activation[self.size//2,self.size//2+1] = 1
# self.activation[self.size//2+1,self.size//2+1] = 1
# self.activation[self.size//2+1,self.size//2] = 1
# self.uut.compute()
# self.activation[...] = 0
# for i in range(30):
# self.uut.compute()
# data = self.uut.excMap
#
# def testReset(self):
# self.uut.setParams(nspike=1)
# self.activation[self.size//2,self.size//2] = 1
# self.uut.compute()
# for i in range(20):
# self.uut.compute()
# data = self.uut.excMap
# self.uut.reset()
# data2 = self.uut.getData()
# self.assertEqual(np.sum(data2),0)
#
# def testComputeP2(self):
# self.activation[self.size//2,self.size//2] = 1
# self.uut.setParams(proba=0.99)
# for i in range(100):
# self.uut.compute()
# data = self.uut.excMap
#
#
# def tes_ComputePrecision(self):#TODO
# self.activation[self.size//2,self.size//2] = 1
# self.uut.setParams(proba=0.99)
# self.uut.setParams(precision=1)
# for i in range(100):
# self.uut.compute()
# data = self.uut.excMap
# view.plotArray(data)
# view.show()
#
#
#
#
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
muxiaobai/CourseExercises | python/kaggle/learn/RandomForestRegressor.py | 1 | 2390 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#https://www.kaggle.com/dansbecker/random-forests
# vary trees
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
# input data
melbourne_file_path ='train.csv'
test = pd.read_csv('test.csv')
melbourne_data = pd.read_csv(melbourne_file_path)
#melbourne_data = melbourne_data.dropna()
# data describe
'''
print (melbourne_data.describe())
print (melbourne_data.columns)
melbourne_price_data = melbourne_data.Price
print (melbourne_price_data.head())
'''
y = melbourne_data.SalePrice
melbourne_predictors = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd']
X = melbourne_data[melbourne_predictors]
#print (X.head())
#print X.isnull()
# split data into train and validation
# how to know test_size and random_state?
train_x,val_x,train_y,val_y = train_test_split(X,y,test_size=0.25,random_state = 0)
# find max_leaf_nodes, then get 400
'''
def getmea(max_leaf_nodes,mea_train_x,mea_test_x,mea_train_y,mea_test_y):
model = DecisionTreeRegressor(max_leaf_nodes = max_leaf_nodes,random_state = 0)
model.fit(mea_train_x,mea_train_y)
predicted_test = model.predict(mea_test_x)
return mean_absolute_error(mea_test_y,predicted_test)
for max_leaf_nodes in [300,350,400,450,500,550,600,650,700,750]:
mea = getmea(max_leaf_nodes,train_x,val_x,train_y,val_y)
print("Max_leaf_nodes: %d ,mea: %d" %(max_leaf_nodes,mea))
'''
# model and train
forest_model = RandomForestRegressor()
forest_model.fit(train_x,train_y)
# validation
predicted_home_prices = forest_model.predict(val_x)
print ("The mean_absolute_error are")
print mean_absolute_error(val_y,predicted_home_prices)
# predict and save output
#print ("Making predictions for the following 5 houses")
#print (val_x.head())
#print ("The predictions are")
predicted_test_prices = forest_model.predict(test[melbourne_predictors])
#print (predicted_home_prices)
my_submission = pd.DataFrame({'Id':test.Id,'SalePrice':predicted_test_prices})
my_submission.to_csv('submission.csv',index = False,header = True)
my_submission.to_csv('result.txt',index=False,header=False,sep='\t')
#save model
#joblib.dump(melbourne_model,'model.pickle')
#load model
#model = joblib.load('model.pickle')
| gpl-2.0 |
live-clones/dolfin-adjoint | examples/time-distributed-control/time-distributed-control.py | 1 | 6656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# .. _klein:
#
# .. py:currentmodule:: dolfin_adjoint
#
# Time-distributed controls
# =========================
#
# .. sectionauthor:: Simon W. Funke <simon@simula.no>
#
#
# Background
# **********
# Some time-dependent problems have control variables that are distributed over
# all (or some) time-levels. The following example demonstrates how this can be
# implemented in dolfin-adjoint.
#
# One important aspect to consider is the regularisation term. For
# time-distributed controls, one typically uses wishes to enforce smoothness
# of the control variables in time. We will also discuss how such a
# regularisation term is implemented.
#
# Problem definition
# ******************
# We consider the heat equation with a time-dependent source term :math:`f`, which will be
# our control variable:
#
# .. math::
# \frac{\partial u}{\partial t} - \nu \nabla^{2} u= f(t)
# \quad & \textrm{in } \Omega \times (0, T), \\
# u = 0 \quad & \textrm{for } \Omega \times \{0\} \\
# u = 0 \quad & \textrm{for } \partial \Omega \times (0, T).
#
#
# where :math:`\Omega` is the unit square, :math:`T` is the final time, :math:`u`
# is the unkown temperature variation, :math:`\nu` is the thermal diffusivity, and
# :math:`g` is the initial temperature.
#
# The objective value, the model output of interest, is the norm of the
# temperature variable integrated over time, plus a regularisation term that
# enforces smoothness of the control in time:
#
# .. math::
# J(u, f) := \int_0^T \int_\Omega (u-d)^2 \textrm{d} \Omega \text{d}t +
# \frac{\alpha}{2} \int_0^T \int_\Omega \dot f^2 \textrm{d} \Omega \text{d}t
#
# The aim of this example is to solve the minimization problem :math:`\min_f J`
# for some given data :math:`d`.
# Implementation
# **************
# We start by importing the needed FEniCS and dolfin-adjoint modules (note that
# `fenics_adjoint` is an alias for `dolfin_adjoint`):
from fenics import *
from fenics_adjoint import *
from collections import OrderedDict
dt_meas = dt # Keep a reference to dt, the time-measure of dolfin-adjoint
# Next, we define the expressions for observational data :math:`d` and the
# viscosity :math:`\nu`.
data = Expression("16*x[0]*(x[0]-1)*x[1]*(x[1]-1)*sin(pi*t)", t=0, degree=4)
nu = Constant(1e-5)
# Next, we define the discretization space:
mesh = UnitSquareMesh(8, 8)
V = FunctionSpace(mesh, "CG", 1)
# ... and time:
dt = Constant(0.1)
T = 2
# We are considering a time-distributed forcing as control. In the next step,
# we create one control function for each timestep in the model, and store all
# controls in a dictionary that maps timestep to control function:
ctrls = OrderedDict()
t = float(dt)
while t <= T:
ctrls[t] = Function(V, annotate=True)
t += float(dt)
# The following function implements a heat equation solver in FEniCS. The
# only `dolfin-adjoint` specific functions are `adj_start_timestep` and
# `adj_inc_timestep` to communicute the time-levels to `dolfin_adjoint`, and the
# `annotate` flag in the assignment to enforce that the update of the forcing
# function is captured in the `dolfin-adjoint` tape:
def solve_heat(ctrls):
u = TrialFunction(V)
v = TestFunction(V)
f = Function(V, name="source")
u_0 = Function(V, name="solution")
d = Function(V, name="data")
F = ( (u - u_0)/dt*v + nu*inner(grad(u), grad(v)) - f*v)*dx
a, L = lhs(F), rhs(F)
bc = DirichletBC(V, 0, "on_boundary")
t = float(dt)
adj_start_timestep(time=t)
while t <= T:
# Update source term from control array
f.assign(ctrls[t])
# Update data function
data.t = t
d.assign(interpolate(data, V), annotate=True)
# Solve PDE
solve(a == L, u_0, bc)
# Update time
t += float(dt)
adj_inc_timestep(time=t, finished=t>T)
return u_0, d
u, d = solve_heat(ctrls)
# With this preparation steps, we are now ready to define the functional.
# First we discretise the regularisation term
#
# .. math::
# \frac{\alpha}{2} \int_0^T \int_\Omega \dot f^2 \textrm{d} \Omega \text{d}t
#
# Note, that :math:`f` is a piecewise linear function in time over the time intervals :math:`K = [(0, \delta t), (\delta t, 2 \delta t), \dots, (T-\delta
# t, T)]`. Thus, we can write the integral as a sum over all intervals
#
# .. math::
# \frac{\alpha}{2} \sum_{a_k, b_k \in K} \int_{a_k}^{b_k} \int_\Omega \dot f(t)^2 \textrm{d} \Omega\text{d}t
#
# Discretising the time-derivative yields:
#
# .. math::
# \frac{\alpha}{2} \sum_K \int_{a_k}^{b_k}
# \int_\Omega \left(\frac{f(b_k)-
# f(a_k)}{b_k-a_k}\right)^2\textrm{d}\Omega \\
# = \frac{\alpha}{2} \sum_K (b_k-a_k)^{-1}
# \int_\Omega \left(f(b_k)- f(a_k)\right)^2\textrm{d}\Omega
#
#
# In code this is translates to:
alpha = Constant(1e-3)
regularisation = alpha/2*sum([1/dt*(fb-fa)**2*dx for fb, fa in
zip(list(ctrls.values())[1:], list(ctrls.values())[:-1])])
# By default, dolfin-adjoint integrates functionals over the entire time-interval.
# Since we have manually discretised the regularistation, it is sufficient
# to tell dolfin-adjoint to evaluate the regularistation at the beginning:
regularisation = regularisation*dt_meas[START_TIME]
# Next, we define the remaining functional terms and controls:
J = Functional((u-d)**2*dx*dt_meas + regularisation)
m = [Control(c) for c in ctrls.values()]
# Finally, we define the reduced functional and solve the optimisation problem:
rf = ReducedFunctional(J, m)
opt_ctrls = minimize(rf, options={"maxiter": 50})
# If we solve this optimisation problem with varying :math:`\alpha` parameters,
# we observe that we get different behaviour in the controls: the higher the
# alpha value, the "smoother" the control function becomes. The following plots
# show the optimised control evaluated at the middle point :math:`(0.5, 0.5)`
# over time for different :math:`\alpha` values:
# .. image:: control_alpha=0.0001.png
# :scale: 50
# :align: left
# .. image:: control_alpha=0.001.png
# :scale: 50
# :align: right
# .. image:: control_alpha=0.01.png
# :scale: 50
# :align: left
# .. image:: control_alpha=0.1.png
# :scale: 50
# :align: right
# The following code creates these plots:
from matplotlib import pyplot, rc
rc('text', usetex=True)
x = [c((0.5, 0.5)) for c in opt_ctrls]
pyplot.plot(x, label="$\\alpha={}$".format(float(alpha)))
pyplot.ylim([-3, 3])
pyplot.legend()
pyplot.savefig("control_alpha={}.png".format(float(alpha)))
| lgpl-3.0 |
lnls-fac/apsuite | apsuite/dynap/dynap_xy.py | 1 | 10945 | """Calculate dynamic aperture."""
import numpy as _np
import matplotlib.pyplot as _mpyplot
import matplotlib.gridspec as _mgridspec
import matplotlib.colors as _mcolors
import matplotlib.text as _mtext
import pyaccel.tracking as _pytrack
from .base import BaseClass as _BaseClass
class DynapXYParams():
"""."""
def __init__(self):
"""."""
self.nrturns = 512
self.turn_by_turn = True
self.x_min = -12.0e-3
self.x_max = 0.0
self.y_min = 0.0
self.y_max = 4.0e-3
self.x_nrpts = 70
self.y_nrpts = 30
self.de_offset = 0.0
self.xl_off = 1e-5
self.yl_off = 1e-5
self.intnux = 49.0
self.intnuy = 14.0
def __str__(self):
"""."""
strng = ''
strng += 'nrturns : {:d}\n'.format(self.nrturns)
strng += 'turn_by_turn : {:s}\n'.format(str(self.turn_by_turn))
strng += 'x_nrpts : {:d}\n'.format(self.x_nrpts)
strng += 'y_nrpts : {:d}\n'.format(self.y_nrpts)
strng += 'x_min [m] : {:.2g}\n'.format(self.x_min)
strng += 'x_max [m] : {:.2g}\n'.format(self.x_max)
strng += 'y_min [m] : {:.2g}\n'.format(self.y_min)
strng += 'y_max [m] : {:.2g}\n'.format(self.y_max)
strng += 'de_offset : {:.3g}\n'.format(self.de_offset)
strng += 'xl_off [rad] : {:.2g}\n'.format(self.xl_off)
strng += 'yl_off [rad] : {:.2g}\n'.format(self.yl_off)
strng += 'intnux : {:.2f} (for graphs)\n'.format(self.intnux)
strng += 'intnuy : {:.2f} (for graphs)\n'.format(self.intnuy)
return strng
class DynapXY(_BaseClass):
"""."""
def __init__(self, accelerator):
"""."""
super().__init__()
self._acc = accelerator
self.params = DynapXYParams()
self.data['x_in'] = _np.array([], dtype=float)
self.data['y_in'] = _np.array([], dtype=float)
self.data['rout'] = _np.array([], dtype=float)
self.data['lost_turn'] = _np.array([], dtype=int)
self.data['lost_element'] = _np.array([], dtype=int)
self.data['lost_plane'] = _np.array([], dtype=int)
self.x_dynap = _np.array([], dtype=float)
self.y_dynap = _np.array([], dtype=float)
self.x_freq_ini = _np.array([], dtype=float)
self.x_freq_fin = _np.array([], dtype=float)
self.y_freq_ini = _np.array([], dtype=float)
self.y_freq_fin = _np.array([], dtype=float)
self.x_diffusion = _np.array([], dtype=float)
self.y_diffusion = _np.array([], dtype=float)
self.diffusion = _np.array([], dtype=float)
def do_tracking(self):
"""."""
x_in, y_in = _np.meshgrid(
_np.linspace(
self.params.x_min, self.params.x_max, self.params.x_nrpts),
_np.linspace(
self.params.y_min, self.params.y_max, self.params.y_nrpts))
if self._acc.cavity_on:
orb = _np.squeeze(_pytrack.find_orbit6(self._acc))
else:
orb = _np.zeros(6)
orb[5] = self.params.de_offset
orb[:4] = _np.squeeze(_pytrack.find_orbit4(
self._acc, energy_offset=self.params.de_offset))
rin = _np.tile(orb, (x_in.size, 1)).T
rin[0, :] += x_in.ravel()
rin[1, :] += self.params.xl_off
rin[2, :] += y_in.ravel()
rin[3, :] += self.params.yl_off
out = _pytrack.ring_pass(
self._acc, rin, nr_turns=self.params.nrturns,
turn_by_turn=self.params.turn_by_turn)
self.data['x_in'] = x_in
self.data['y_in'] = y_in
self.data['rout'] = out[0]
self.data['lost_turn'] = out[2]
self.data['lost_element'] = out[3]
self.data['lost_plane'] = out[4]
def process_data(self):
"""."""
self.calc_dynap()
self.calc_fmap()
def calc_dynap(self):
"""."""
x_in = self.data['x_in']
y_in = self.data['y_in']
lost_plane = self.data['lost_plane']
self.x_dynap, self.y_dynap = self._calc_dynap(x_in, y_in, lost_plane)
def calc_fmap(self):
"""."""
rout = self.data['rout']
lost_plane = self.data['lost_plane']
fx1, fx2, fy1, fy2, diffx, diffy, diff = super()._calc_fmap(
rout, lost_plane)
self.x_freq_ini = fx1
self.x_freq_fin = fx2
self.y_freq_ini = fy1
self.y_freq_fin = fy2
self.x_diffusion = diffx
self.y_diffusion = diffy
self.diffusion = diff
def map_resons2real_plane(self, resons, maxdist=1e-5, min_diffusion=1e-3):
"""."""
freqx = self.params.intnux + self.x_freq_ini
freqy = self.params.intnuy + self.y_freq_ini
diff = self.diffusion
return super()._map_resons2real_plane(
freqx, freqy, diff, resons, maxdist=maxdist, mindiff=min_diffusion)
# Make figures
def make_figure_diffusion(
self, contour=True, resons=None, orders=3, symmetry=1,
maxdist=1e-5, min_diffusion=1e-3):
"""."""
fig = _mpyplot.figure(figsize=(7, 7))
grid = _mgridspec.GridSpec(2, 20)
grid.update(
left=0.15, right=0.86, top=0.97, bottom=0.1,
hspace=0.25, wspace=0.25)
axx = _mpyplot.subplot(grid[0, :19])
ayy = _mpyplot.subplot(grid[1, :19])
cbaxes = _mpyplot.subplot(grid[:, -1])
axx.name = 'XY'
ayy.name = 'Tune'
diff = self.diffusion
diff = _np.log10(diff)
norm = _mcolors.Normalize(vmin=-10, vmax=-2)
if contour:
axx.contourf(
self.data['x_in']*1e3,
self.data['y_in']*1e3,
diff.reshape(self.data['x_in'].shape),
norm=norm, cmap='jet')
else:
axx.scatter(
self.data['x_in'].ravel()*1e3,
self.data['y_in'].ravel()*1e3,
c=diff, norm=norm, cmap='jet')
axx.grid(False)
axx.set_xlabel('X [mm]')
axx.set_ylabel('Y [mm]')
freqx = self.params.intnux + self.x_freq_ini
freqy = self.params.intnuy + self.y_freq_ini
line = ayy.scatter(
freqx, freqy, c=diff, norm=norm, cmap='jet')
ayy.set_xlabel(r'$\nu_x$')
ayy.set_ylabel(r'$\nu_y$')
if resons is None:
bounds = ayy.axis()
resons = self.calc_resonances_for_bounds(
bounds, orders=orders, symmetry=symmetry)
map2xy = self.map_resons2real_plane(
resons=resons, maxdist=maxdist, min_diffusion=min_diffusion)
xdata = self.data['x_in'].ravel()*1e3
ydata = self.data['y_in'].ravel()*1e3
for (coefx, coefy, _), ind in zip(resons, map2xy):
order = int(_np.abs(coefx) + _np.abs(coefy))
idx = order - 1
axx.scatter(
xdata[ind], ydata[ind],
c=self.COLORS[idx % len(self.COLORS)])
self.add_resonances_to_axis(ayy, resons=resons)
cbar = fig.colorbar(line, cax=cbaxes)
cbar.set_label('Diffusion')
fig.canvas.mpl_connect('button_press_event', self._onclick)
ann = ayy.annotate(
'', xy=(0, 0), xycoords='data',
xytext=(20, 20), textcoords='offset points',
arrowprops={'arrowstyle': '->'},
bbox={'boxstyle': 'round', 'fc': 'w'})
ann.set_visible(False)
fig.canvas.mpl_connect('motion_notify_event', self._onhover)
return fig, axx, ayy
def _onclick(self, event):
if not event.dblclick or event.inaxes is None:
return
fig = event.inaxes.figure
ax_nu = [ax for ax in fig.axes if ax.name == 'Tune'][0]
ax_xy = [ax for ax in fig.axes if ax.name == 'XY'][0]
xdata = self.data['x_in'].ravel()*1e3
ydata = self.data['y_in'].ravel()*1e3
xfreq = self.params.intnux + self.x_freq_ini
yfreq = self.params.intnuy + self.y_freq_ini
if event.inaxes == ax_nu:
xdiff = xfreq - event.xdata
ydiff = yfreq - event.ydata
if event.inaxes == ax_xy:
xdiff = xdata - event.xdata
ydiff = ydata - event.ydata
ind = _np.nanargmin(_np.sqrt(xdiff*xdiff + ydiff*ydiff))
ax_xy.scatter(xdata[ind], ydata[ind], c='k')
ax_nu.scatter(xfreq[ind], yfreq[ind], c='k')
fig.canvas.draw()
def _onhover(self, event):
if event.inaxes is None:
return
fig = event.inaxes.figure
ax_nu = [ax for ax in fig.axes if ax.name == 'Tune'][0]
chil = ax_nu.get_children()
ann = [c for c in chil if isinstance(c, _mtext.Annotation)][0]
if event.inaxes != ax_nu:
ann.set_visible(False)
fig.canvas.draw()
return
for line in ax_nu.lines:
if line.contains(event)[0] and line.name.startswith('reson'):
ann.set_text(line.name.split(':')[1])
ann.xy = (event.xdata, event.ydata)
ann.set_visible(True)
break
else:
ann.set_visible(False)
fig.canvas.draw()
def make_figure_map_real2tune_planes(
self, resons=None, orders=3, symmetry=1):
"""."""
fig = _mpyplot.figure(figsize=(7, 7))
grid = _mgridspec.GridSpec(2, 20)
grid.update(
left=0.15, right=0.86, top=0.97, bottom=0.1,
hspace=0.25, wspace=0.25)
axx = _mpyplot.subplot(grid[0, :19])
ayy = _mpyplot.subplot(grid[1, :19])
cbx = _mpyplot.subplot(grid[0, -1])
cby = _mpyplot.subplot(grid[1, -1])
freqx = self.params.intnux + self.x_freq_ini
freqy = self.params.intnuy + self.y_freq_ini
# X
norm = _mcolors.Normalize(
vmin=self.params.x_min*1e3,
vmax=self.params.x_max*1e3)
line = axx.scatter(
freqx, freqy, c=self.data['x_in'].ravel()*1e3,
norm=norm, cmap='jet')
axx.set_xlabel(r'$\nu_x$')
axx.set_ylabel(r'$\nu_y$')
if resons is None:
bounds = axx.axis()
resons = self.calc_resonances_for_bounds(
bounds, orders=orders, symmetry=symmetry)
self.add_resonances_to_axis(axx, resons=resons)
cbar = fig.colorbar(line, cax=cbx)
cbar.set_label('X [mm]')
# Y
norm = _mcolors.Normalize(
vmin=self.params.y_min*1e3,
vmax=self.params.y_max*1e3)
line = ayy.scatter(
freqx, freqy, c=self.data['y_in'].ravel()*1e3,
norm=norm, cmap='jet')
ayy.set_xlabel(r'$\nu_x$')
ayy.set_ylabel(r'$\nu_y$')
self.add_resonances_to_axis(ayy, resons=resons)
cbar = fig.colorbar(line, cax=cby)
cbar.set_label('Y [mm]')
return fig, axx, ayy
| mit |
jmmease/pandas | pandas/core/algorithms.py | 6 | 51528 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn, catch_warnings
import numpy as np
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndex,
ABCIndexClass, ABCCategorical)
from pandas.core.dtypes.common import (
is_unsigned_integer_dtype, is_signed_integer_dtype,
is_integer_dtype, is_complex_dtype,
is_object_dtype,
is_categorical_dtype, is_sparse,
is_period_dtype,
is_numeric_dtype, is_float_dtype,
is_bool_dtype, needs_i8_conversion,
is_categorical, is_datetimetz,
is_datetime64_any_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_scalar, is_list_like,
_ensure_platform_int, _ensure_object,
_ensure_float64, _ensure_uint64,
_ensure_int64)
from pandas.compat.numpy import _np_version_under1p10
from pandas.core.dtypes.missing import isna
from pandas.core import common as com
from pandas._libs import algos, lib, hashtable as htable
from pandas._libs.tslib import iNaT
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return _ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos suppport uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return _ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return _ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return _ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return _ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
values = _ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return _ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos suppport int* directly (not all do)
values = _ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values)
return _ensure_object(values), 'object', 'object'
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_categorical_dtype(dtype):
pass
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not isinstance(values, (np.ndarray, ABCCategorical,
ABCIndexClass, ABCSeries)):
inferred = lib.infer_dtype(values)
if inferred in ['mixed', 'string', 'unicode']:
if isinstance(values, tuple):
values = list(values)
values = lib.list_to_object_array(values)
else:
values = np.asarray(values)
return values
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'string': (htable.StringHashTable, htable.ObjectVector),
'object': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
else:
ndtype = 'object'
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
f = func_map.get(ndtype, func_map['object'])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
See Also
--------
pandas.Index.unique
pandas.Series.unique
"""
values = _ensure_arraylike(values)
# categorical is a fast-path
# this will coerce Categorical, CategoricalIndex,
# and category dtypes Series to same return of Category
if is_categorical_dtype(values):
values = getattr(values, '.values', values)
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
uniques = uniques.asobject.values
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]"
.format(comps_type=type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype('float64', copy=False)
comps = comps.astype('float64', copy=False)
checknull = isna(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or
Series
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
values = _ensure_arraylike(values)
original = values
values, dtype, _ = _ensure_data(values)
(hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques = vec_klass()
check_nulls = not is_integer_dtype(original)
labels = table.get_labels(values, uniques, 0, na_sentinel, check_nulls)
labels = _ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype('interval')
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_categorical_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values).values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(), name=values.name)
return values.mode()
values, dtype, ndtype = _ensure_data(values)
# TODO: this should support float64
if ndtype not in ['int64', 'uint64', 'object']:
ndtype = 'object'
values = _ensure_object(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: {error}".format(error=e))
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
def _broadcast(arr_or_scalar, shape):
"""
Helper function to broadcast arrays / scalars to the desired shape.
"""
if _np_version_under1p10:
if lib.isscalar(arr_or_scalar):
out = np.empty(shape)
out.fill(arr_or_scalar)
else:
out = arr_or_scalar
else:
out = np.broadcast_to(arr_or_scalar, shape)
return out
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = _broadcast(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = _broadcast(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'uint64': algos.rank_1d_uint64,
'object': algos.rank_1d_object
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'uint64': algos.rank_2d_uint64,
'object': algos.rank_2d_object
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN(object):
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
def nlargest(self):
return self.compute('nlargest')
def nsmallest(self):
return self.compute('nsmallest')
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
needs_i8_conversion(dtype))
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError("Cannot use method '{method}' with "
"dtype {dtype}".format(method=method,
dtype=dtype))
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = (self.keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, _, _ = _ensure_data(dropped.values)
if method == 'nlargest':
arr = -arr
if self.keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if self.keep == 'last':
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
if not is_list_like(columns):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError((
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method))
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == 'nsmallest':
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it is the last column in columns, or if the values
# returned are unique in frame[column] we save this index
# and break
# Otherwise we must save the index of the non duplicated values
# and set the next cur_frame to cur_frame filtered on all
# duplcicated values (#GH15297)
series = cur_frame[column]
values = getattr(series, method)(cur_n, keep=self.keep)
is_last_column = len(columns) - 1 == i
if is_last_column or values.nunique() == series.isin(values).sum():
# Last column in columns or values are unique in
# series => values
# is all that matters
indexer = get_indexer(indexer, values.index)
break
duplicated_filter = series.duplicated(keep=False)
duplicated = values[duplicated_filter]
non_duplicated = values[~duplicated_filter]
indexer = get_indexer(indexer, non_duplicated.index)
# Must set cur frame to include all duplicated values
# to consider for the next column, we also can reduce
# cur_n by the current length of the indexer
cur_frame = cur_frame[series.isin(duplicated)]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
return frame
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = _ensure_int64(indexer)
_take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
# dispatch to internal type takes
if is_categorical(arr):
return arr.take_nd(indexer, fill_value=fill_value,
allow_fill=allow_fill)
elif is_datetimetz(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analogous to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
| bsd-3-clause |
Odingod/mne-python | mne/epochs.py | 1 | 85604 | """Tools for working with epoched data"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Denis Engemann <denis.engemann@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
from .externals.six import string_types
import copy as cp
import warnings
import json
import numpy as np
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float_matrix, write_float,
write_id, write_string)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open
from .io.tree import dir_tree_find
from .io.tag import read_tag
from .io.constants import FIFF
from .io.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import _BaseRaw, ToDataFrameMixin
from .evoked import EvokedArray, aspect_rev
from .baseline import rescale
from .channels.channels import (ContainsMixin, PickDropChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import resample, detrend, FilterMixin
from .event import _read_events_fif
from .fixes import in1d
from .viz import (plot_epochs, _drop_log_stats, plot_epochs_psd,
plot_epochs_psd_topomap)
from .utils import (check_fname, logger, verbose, _check_type_picks,
_time_mask, check_random_state, object_hash)
from .externals.six import iteritems
from .externals.six.moves import zip
class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin):
"""Abstract base class for Epochs-type classes
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
"""
def __init__(self, info, event_id, tmin, tmax, baseline=(None, 0),
picks=None, name='Unknown', reject=None, flat=None,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, verbose=None):
self.verbose = verbose
self.name = name
if isinstance(event_id, dict):
if not all(isinstance(v, int) for v in event_id.values()):
raise ValueError('Event IDs must be of type integer')
if not all(isinstance(k, string_types) for k in event_id):
raise ValueError('Event names must be of type str')
self.event_id = event_id
elif isinstance(event_id, list):
if not all(isinstance(v, int) for v in event_id):
raise ValueError('Event IDs must be of type integer')
self.event_id = dict(zip((str(i) for i in event_id), event_id))
elif isinstance(event_id, int):
self.event_id = {str(event_id): event_id}
else:
raise ValueError('event_id must be dict or int.')
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if detrend not in [None, 0, 1]:
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if baseline is not None:
baseline_tmin, baseline_tmax = baseline
tstep = 1. / info['sfreq']
if baseline_tmin is not None:
if baseline_tmin < tmin - tstep:
err = ("Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
raise ValueError(err)
if baseline_tmax is not None:
if baseline_tmax > tmax + tstep:
err = ("Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
raise ValueError(err)
self.tmin = tmin
self.tmax = tmax
self.baseline = baseline
self.reject = reject
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.flat = flat
self.decim = decim = int(decim)
self._bad_dropped = False
self.drop_log = None
self.selection = None
self.detrend = detrend
# Handle measurement info
self.info = info
if picks is None:
picks = list(range(len(self.info['ch_names'])))
else:
self.info['chs'] = [self.info['chs'][k] for k in picks]
self.info['ch_names'] = [self.info['ch_names'][k] for k in picks]
self.info['nchan'] = len(picks)
self.picks = _check_type_picks(picks)
if len(picks) == 0:
raise ValueError("Picks cannot be empty.")
# Handle times
if tmin >= tmax:
raise ValueError('tmin has to be smaller than tmax')
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self._epoch_stop = ep_len = len(self._raw_times)
if decim > 1:
new_sfreq = sfreq / decim
lowpass = self.info['lowpass']
if lowpass is None:
msg = ('The measurement information indicates data is not '
'low-pass filtered. The decim=%i parameter will '
'result in a sampling frequency of %g Hz, which can '
'cause aliasing artifacts.'
% (decim, new_sfreq))
warnings.warn(msg)
elif new_sfreq < 2.5 * lowpass:
msg = ('The measurement information indicates a low-pass '
'frequency of %g Hz. The decim=%i parameter will '
'result in a sampling frequency of %g Hz, which can '
'cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # 50% over nyquist limit
warnings.warn(msg)
i_start = start_idx % decim
self._decim_idx = slice(i_start, ep_len, decim)
self.times = self._raw_times[self._decim_idx]
self.info['sfreq'] = new_sfreq
else:
self.times = self._raw_times
self.preload = False
self._data = None
self._offset = None
# setup epoch rejection
self._reject_setup()
def _reject_setup(self):
"""Sets self._reject_time and self._channel_type_idx (called from
__init__)
"""
if self.reject is None and self.flat is None:
return
idx = channel_indices_by_type(self.info)
for key in idx.keys():
if (self.reject is not None and key in self.reject) \
or (self.flat is not None and key in self.flat):
if len(idx[key]) == 0:
raise ValueError("No %s channel found. Cannot reject based"
" on %s." % (key.upper(), key.upper()))
self._channel_type_idx = idx
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _preprocess(self, epoch, verbose=None):
""" Aux Function
"""
# Detrend
if self.detrend is not None:
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=False, eog=False, ecg=False,
emg=False, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=True, eog=True, ecg=True,
emg=True, exclude=[])
epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
'mean', copy=False, verbose=verbose)
# handle offset
if self._offset is not None:
epoch += self._offset
# Decimate
if self.decim > 1:
epoch = epoch[:, self._decim_idx]
return epoch
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
The epochs data
"""
if self.preload:
return self._data
else:
data = self._get_data_from_disk()
return data
def iter_evoked(self):
"""Iterate over Evoked objects with nave=1
"""
self._current = 0
while True:
data, event_id = self.next(True)
tmin = self.times[0]
info = cp.deepcopy(self.info)
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1].
References
----------
[1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = pick_types(self.info, meg=True, eeg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
['grad', 'mag', 'eeg']]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warnings.warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
def _get_data_from_disk(self, out=True, verbose=None):
raise NotImplementedError('_get_data_from_disk() must be implemented '
'in derived class.')
def __iter__(self):
"""To make iteration over epochs easy.
"""
self._current = 0
return self
def next(self, return_event_id=False):
raise NotImplementedError('next() must be implemented in derived '
'class.')
def __next__(self, *args, **kwargs):
"""Wrapper for Py3k"""
return self.next(*args, **kwargs)
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash epochs unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def average(self, picks=None):
"""Compute average of epochs
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The averaged epochs.
"""
return self._compute_mean_or_stderr(picks, 'ave')
def standard_error(self, picks=None):
"""Compute standard error over epochs
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_mean_or_stderr(picks, 'stderr')
def _compute_mean_or_stderr(self, picks, mode='ave'):
"""Compute the mean or std over epochs and return Evoked"""
_do_std = True if mode == 'stderr' else False
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
if not _do_std:
data = np.mean(self._data, axis=0)
else:
data = np.std(self._data, axis=0)
assert len(self.events) == len(self._data)
else:
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if _do_std:
data_mean = cp.copy(data)
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if not _do_std:
_aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
else:
_aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
data /= np.sqrt(n_events)
kind = aspect_rev.get(str(_aspect_kind), 'Unknown')
info = cp.deepcopy(self.info)
evoked = EvokedArray(data, info, tmin=self.times[0],
comment=self.name, nave=n_events, kind=kind,
verbose=self.verbose)
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
evoked._aspect_kind = _aspect_kind
# pick channels
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=[])
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warnings.warn('evoked object is empty (based on less '
'than 1 epoch)', RuntimeWarning)
return evoked
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def plot(self, epoch_idx=None, picks=None, scalings=None,
title_str='#%003i', show=True, block=False):
"""Visualize single trials using Trellis plot.
Parameters
----------
epoch_idx : array-like | int | None
The epochs to visualize. If None, the first 20 epochs are shown.
Defaults to None.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to
``dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)``.
title_str : None | str
The string formatting to use for axes titles. If None, no titles
will be shown. Defaults expand to ``#001, #002, ...``.
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
return plot_epochs(self, epoch_idx=epoch_idx, picks=picks,
scalings=scalings, title_str=title_str,
show=show, block=block)
def plot_psd(self, fmin=0, fmax=np.inf, proj=False, n_fft=256,
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, n_overlap=0, dB=True,
n_jobs=1, verbose=None, show=True):
"""Plot the power spectral density across epochs
Parameters
----------
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across
channels) will be plotted. If 'range', the min and max (across
channels) will be plotted. Bad channels will be excluded from
these calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
n_fft=n_fft, picks=picks, ax=ax,
color=color, area_mode=area_mode,
area_alpha=area_alpha,
n_overlap=n_overlap, dB=dB, n_jobs=n_jobs,
verbose=None, show=show)
def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
n_fft=256, ch_type=None,
n_overlap=0, layout=None, cmap='RdBu_r',
agg_fun=None, dB=True, n_jobs=1, normalize=False,
cbar_fmt='%0.3f', outlines='head', show=True,
verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
n_overlap : int
The number of points of overlap between blocks.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be devided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn.
If dict, each key refers to a tuple of x and y positions.
The values in 'mask_pos' will serve as image mask. If None, nothing
will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool)
field will trigger automated shrinking of the positions due to
points outside the outline. Moreover, a matplotlib patch object can
be passed for advanced masking options, either directly or as a
function that returns patches (required for multi-axis plots).
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self, bands=bands, vmin=vmin, vmax=vmax, proj=proj, n_fft=n_fft,
ch_type=ch_type, n_overlap=n_overlap, layout=layout, cmap=cmap,
agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
class Epochs(_BaseEpochs, ToDataFrameMixin):
"""List of Epochs
Parameters
----------
raw : Raw object
An instance of Raw.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to acces associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
tmax : float
End time after event.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels are used).
name : string
Comment that describes the Evoked data created.
preload : boolean
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
on_missing : str
What to do if one or several event ids are not found in the recording.
Valid keys are 'error' | 'warning' | 'ignore'
Default is 'error'. If on_missing is 'warning' it will proceed but
warn, if 'ignore' it will proceed silently. Note.
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Attributes
----------
info: dict
Measurement info.
event_id : dict
Names of of conditions corresponding to event_ids.
ch_names : list of string
List of channels' names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : list of lists
A list of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty list; otherwise it will be
a list of the reasons the event is not longer in the selection, e.g.:
'IGNORED' if it isn't part of the current subset defined by the user;
'NO DATA' or 'TOO SHORT' if epoch didn't contain enough data;
names of channels that exceeded the amplitude threshold;
'EQUALIZED_COUNTS' (see equalize_event_counts);
or user-defined reasons (see drop_epochs).
verbose : bool, str, int, or None
See above.
Notes
-----
For indexing and slicing:
epochs[idx] : Epochs
Return Epochs object with a subset of epochs (supports single
index and python-style slicing)
For subset selection using categorial labels:
epochs['name'] : Epochs
Return Epochs object with a subset of epochs corresponding to an
experimental condition as specified by 'name'.
If conditions are tagged by names separated by '/' (e.g. 'audio/left',
'audio/right'), and 'name' is not in itself an event key, this selects
every event whose condition contains the 'name' tag (e.g., 'left'
matches 'audio/left' and 'visual/left'; but not 'audio_left'). Note
that tags like 'auditory/left' and 'left/auditory' will be treated the
same way when accessed using tags.
epochs[['name_1', 'name_2', ... ]] : Epochs
Return Epochs object with a subset of epochs corresponding to multiple
experimental conditions as specified by 'name_1', 'name_2', ... .
If conditions are separated by '/', selects every item containing every
list tag (e.g. ['audio', 'left'] selects 'audio/left' and
'audio/center/left', but not 'audio/right').
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
picks=None, name='Unknown', preload=False, reject=None,
flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, add_eeg_ref=True,
on_missing='error', verbose=None):
if raw is None:
return
elif not isinstance(raw, _BaseRaw):
raise ValueError('The first argument to `Epochs` must be `None` '
'or an instance of `mne.io.Raw`')
if on_missing not in ['error', 'warning', 'ignore']:
raise ValueError('on_missing must be one of: error, '
'warning, ignore. Got: %s' % on_missing)
# prepare for calling the base constructor
# Handle measurement info
info = cp.deepcopy(raw.info)
# make sure projs are really copied.
info['projs'] = [cp.deepcopy(p) for p in info['projs']]
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
# call _BaseEpochs constructor
super(Epochs, self).__init__(info, event_id, tmin, tmax,
baseline=baseline, picks=picks, name=name,
reject=reject, flat=flat, decim=decim,
reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
add_eeg_ref=add_eeg_ref, verbose=verbose)
# do the rest
self.raw = raw
if proj not in [True, 'delayed', False]:
raise ValueError(r"'proj' must either be 'True', 'False' or "
"'delayed'")
proj = proj or raw.proj # proj is on when applied in Raw
if self._check_delayed(proj):
logger.info('Entering delayed SSP mode.')
activate = False if self._check_delayed() else proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=activate)
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
if on_missing == 'error':
raise ValueError(msg)
elif on_missing == 'warning':
logger.warning(msg)
warnings.warn(msg)
else: # on_missing == 'ignore':
pass
# Select the desired events
values = list(self.event_id.values())
selected = in1d(events[:, 2], values)
self.events = events[selected]
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warnings.warn('The events passed to the Epochs constructor '
'are not chronologically ordered.',
RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
self.selection = np.where(selected)[0]
self.drop_log = []
for k in range(len(events)):
if events[k, 2] in values:
self.drop_log.append([])
else:
self.drop_log.append(['IGNORED'])
self.preload = preload
if self.preload:
self._data = self._get_data_from_disk()
self.raw = None
else:
self._data = None
def drop_bad_epochs(self):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. Warning:: Operation is slow since all epochs have to be read from
disk. To avoid reading epochs form disk multiple times, initialize
Epochs object with preload=True.
"""
self._get_data_from_disk(out=False)
def drop_log_stats(self, ignore=['IGNORED']):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
return _drop_log_stats(self.drop_log, ignore)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
perc : float
Total percentage of epochs dropped.
fig : Instance of matplotlib.figure.Figure
The figure.
"""
if not self._bad_dropped:
print("Bad epochs have not yet been dropped.")
return
from .viz import plot_drop_log
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
def _check_delayed(self, proj=None):
""" Aux method
"""
is_delayed = False
if proj == 'delayed':
if self.reject is None:
raise RuntimeError('The delayed SSP mode was requested '
'but no rejection parameters are present. '
'Please add rejection parameters before '
'using this option.')
self._delayed_proj = True
is_delayed = True
elif hasattr(self, '_delayed_proj'):
is_delayed = self._delayed_proj
return is_delayed
@verbose
def drop_epochs(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask
Note that the indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any external indices
(e.g., behavioral logs). To drop epochs based on external criteria,
do not use the preload=True flag when constructing an Epochs object,
and call this method before calling the drop_bad_epochs method.
Parameters
----------
indices : array of ints or bools
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
out_of_bounds = (indices < 0) | (indices >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
for ii in indices:
self.drop_log[self.selection[ii]].append(reason)
self.selection = np.delete(self.selection, indices)
self.events = np.delete(self.events, indices, axis=0)
if self.preload:
self._data = np.delete(self._data, indices, axis=0)
count = len(indices)
logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
@verbose
def _get_epoch_from_disk(self, idx, verbose=None):
"""Load one epoch from disk"""
proj = True if self._check_delayed() else self.proj
if self.raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found.'
' Please report this to the mne-python '
'developers.')
sfreq = self.raw.info['sfreq']
if self.events.ndim == 1:
# single event
event_samp = self.events[0]
else:
event_samp = self.events[idx, 0]
# Read a data segment
first_samp = self.raw.first_samp
start = int(round(event_samp + self.tmin * sfreq)) - first_samp
stop = start + self._epoch_stop
if start < 0:
return None, None
epoch_raw, _ = self.raw[self.picks, start:stop]
# setup list of epochs to handle delayed SSP
epochs = []
# whenever requested, the first epoch is being projected.
if self._projector is not None and proj is True:
epochs += [np.dot(self._projector, epoch_raw)]
else:
epochs += [epoch_raw]
# if has delayed SSP append another unprojected epoch
if self._check_delayed():
epochs += [epoch_raw.copy()]
# only preprocess first candidate, to make delayed SSP working
# we need to postpone the preprocessing since projection comes
# first.
epochs[0] = self._preprocess(epochs[0])
# return a second None if nothing is projected
if len(epochs) == 1:
epochs += [None]
return epochs
@verbose
def _get_data_from_disk(self, out=True, verbose=None):
"""Load all data from disk
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
n_events = len(self.events)
data = np.array([])
if self._bad_dropped:
if not out:
return
for idx in range(n_events):
# faster to pre-allocate memory here
epoch, epoch_raw = self._get_epoch_from_disk(idx)
if idx == 0:
data = np.empty((n_events, epoch.shape[0],
epoch.shape[1]), dtype=epoch.dtype)
if self._check_delayed():
epoch = epoch_raw
data[idx] = epoch
else:
good_events = []
n_out = 0
for idx, sel in zip(range(n_events), self.selection):
epoch, epoch_raw = self._get_epoch_from_disk(idx)
is_good, offenders = self._is_good_epoch(epoch)
if not is_good:
self.drop_log[sel] += offenders
continue
good_events.append(idx)
if self._check_delayed():
epoch = epoch_raw
if out:
# faster to pre-allocate, then trim as necessary
if n_out == 0:
data = np.empty((n_events, epoch.shape[0],
epoch.shape[1]),
dtype=epoch.dtype, order='C')
data[n_out] = epoch
n_out += 1
self.selection = self.selection[good_events]
self.events = np.atleast_2d(self.events[good_events])
self._bad_dropped = True
logger.info("%d bad epochs dropped"
% (n_events - len(good_events)))
if not out:
return
# just take the good events
assert len(good_events) == n_out
if n_out > 0:
# slicing won't free the space, so we resize
# we have ensured the C-contiguity of the array in allocation
# so this operation will be safe unless np is very broken
data.resize((n_out,) + data.shape[1:], refcheck=False)
return data
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
The epochs data
"""
if self.preload:
data_ = self._data
else:
data_ = self._get_data_from_disk()
if self._check_delayed():
data = np.zeros_like(data_)
for ii, e in enumerate(data_):
data[ii] = self._preprocess(e.copy(), self.verbose)
else:
data = data_
return data
def __len__(self):
"""Number of epochs.
"""
if not self._bad_dropped:
err = ("Since bad epochs have not been dropped, the length of the "
"Epochs is not known. Load the Epochs with preload=True, "
"or call Epochs.drop_bad_epochs(). To find the number of "
"events in the Epochs, use len(Epochs.events).")
raise RuntimeError(err)
return len(self.events)
def __iter__(self):
"""To make iteration over epochs easy.
"""
self._current = 0
return self
def next(self, return_event_id=False):
"""To make iteration over epochs easy.
Parameters
----------
return_event_id : bool
If True, return both an epoch and and event_id.
Returns
-------
epoch : instance of Epochs
The epoch.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
raise StopIteration
epoch = self._data[self._current]
if self._check_delayed():
epoch = self._preprocess(epoch.copy(), self.verbose)
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
raise StopIteration
epoch, epoch_raw = self._get_epoch_from_disk(self._current)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._check_delayed():
epoch = self._preprocess(epoch_raw, self.verbose)
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
return epoch if not return_event_id else epoch, self.event_id
def __repr__(self):
""" Build string representation
"""
if not self._bad_dropped:
s = 'n_events : %s (good & bad)' % len(self.events)
else:
s = 'n_events : %s (all good)' % len(self.events)
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
if len(self.event_id) > 1:
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
s += ',\n %s' % ', '.join(counts)
return '<%s | %s>' % (self.__class__.__name__, s)
def _key_match(self, key):
"""Helper function for event dict use"""
if key not in self.event_id:
raise KeyError('Event "%s" is not in Epochs.' % key)
return self.events[:, 2] == self.event_id[key]
def __getitem__(self, key):
"""Return an Epochs object with a subset of epochs
"""
data = self._data
del self._data
epochs = self.copy()
self._data, epochs._data = data, data
if isinstance(key, string_types):
key = [key]
if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
if any('/' in k_i for k_i in epochs.event_id.keys()):
if any(k_e not in epochs.event_id for k_e in key):
# Select a given key if the requested set of
# '/'-separated types are a subset of the types in that key
key = [k for k in epochs.event_id.keys()
if all(set(k_i.split('/')).issubset(k.split('/'))
for k_i in key)]
if len(key) == 0:
raise KeyError('Attempting selection of events via '
'multiple/partial matching, but no '
'event matches all criteria.')
select = np.any(np.atleast_2d([epochs._key_match(k)
for k in key]), axis=0)
epochs.name = '+'.join(key)
else:
select = key if isinstance(key, slice) else np.atleast_1d(key)
key_selection = epochs.selection[select]
for k in np.setdiff1d(epochs.selection, key_selection):
epochs.drop_log[k] = ['IGNORED']
epochs.selection = key_selection
epochs.events = np.atleast_2d(epochs.events[select])
if epochs.preload:
epochs._data = epochs._data[select]
# update event id to reflect new content of epochs
epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
if v in epochs.events[:, 2])
return epochs
def crop(self, tmin=None, tmax=None, copy=False):
"""Crops a time interval from epochs object.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
Returns
-------
epochs : Epochs instance
The cropped epochs.
Notes
-----
Unlike Python slices, MNE time intervals include both their end points;
crop(tmin, tmax) returns the interval tmin <= t <= tmax.
"""
if not self.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warnings.warn("tmin is not in epochs' time interval."
"tmin is set to epochs.tmin")
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warnings.warn("tmax is not in epochs' time interval."
"tmax is set to epochs.tmax")
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax)
tidx = np.where(tmask)[0]
this_epochs = self if not copy else self.copy()
this_epochs.tmin = this_epochs.times[tidx[0]]
this_epochs.tmax = this_epochs.times[tidx[-1]]
this_epochs.times = this_epochs.times[tmask]
this_epochs._data = this_epochs._data[:, :, tmask]
return this_epochs
@verbose
def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
verbose=None):
"""Resample preloaded data
Parameters
----------
sfreq : float
New sample rate to use
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
if self.preload:
o_sfreq = self.info['sfreq']
self._data = resample(self._data, sfreq, o_sfreq, npad,
n_jobs=n_jobs)
# adjust indirectly affected variables
self.info['sfreq'] = sfreq
self.times = (np.arange(self._data.shape[2], dtype=np.float) /
sfreq + self.times[0])
else:
raise RuntimeError('Can only resample preloaded data')
def copy(self):
"""Return copy of Epochs instance"""
raw = self.raw
del self.raw
new = cp.deepcopy(self)
self.raw = raw
new.raw = raw
return new
def save(self, fname):
"""Save epochs in a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
# Create the file and save the essentials
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if self.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, self.info['meas_id'])
# Write measurement info
write_meas_info(fid, self.info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = self.get_data()
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, self.events.T)
mapping_ = ';'.join([k + ':' + str(v) for k, v in
self.event_id.items()])
write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# First and last sample
first = int(self.times[0] * self.info['sfreq'])
last = first + len(self.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if self.baseline is not None:
bmin, bmax = self.baseline
bmin = self.times[0] if bmin is None else bmin
bmax = self.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(self.info['nchan'])
for k in range(self.info['nchan']):
decal[k] = 1.0 / (self.info['chs'][k]['cal'] *
self.info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
json.dumps(self.drop_log))
write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
self.selection)
end_block(fid, FIFF.FIFFB_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def equalize_event_counts(self, event_ids, method='mintime', copy=True):
"""Equalize the number of trials in each condition
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will
be minimized.
copy : bool
If True, a copy of epochs will be returned. Otherwise, the
function will operate in-place.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
"""
if copy is True:
epochs = self.copy()
else:
epochs = self
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not epochs._bad_dropped:
epochs.drop_bad_epochs()
# figure out how to equalize
eq_inds = list()
for eq in event_ids:
eq = np.atleast_1d(eq)
# eq is now a list of types
key_match = np.zeros(epochs.events.shape[0])
for key in eq:
key_match = np.logical_or(key_match, epochs._key_match(key))
eq_inds.append(np.where(key_match)[0])
event_times = [epochs.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
epochs.drop_epochs(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return epochs, indices
class EpochsArray(Epochs):
"""Epochs object from numpy array
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
tmin : float
Start time before event.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to acces associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
baseline : None or tuple of length 2 (default: None)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
"""
@verbose
def __init__(self, data, info, events, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != np.shape(data)[1]:
raise ValueError('Info and data must have same number of '
'channels.')
self.info = info
self._data = data
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
self.event_id = event_id
self.events = events
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
raise ValueError(msg)
self.baseline = baseline
self.preload = True
self.reject = None
self.decim = 1
self._decim_idx = slice(0, data.shape[-1], self.decim)
self.raw = None
self.drop_log = [[] for _ in range(len(events))]
self._bad_dropped = True
self.selection = np.arange(len(events))
self.picks = None
self.times = (np.arange(data.shape[-1], dtype=np.float) /
info['sfreq'] + tmin)
self.tmin = self.times[0]
self.tmax = self.times[-1]
self.verbose = verbose
self.name = 'Unknown'
self._projector = None
self.reject = reject
self.flat = flat
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self._reject_setup()
drop_inds = list()
if self.reject is not None or self.flat is not None:
for i_epoch, epoch in enumerate(self):
is_good, chan = self._is_good_epoch(epoch,
verbose=self.verbose)
if not is_good:
drop_inds.append(i_epoch)
self.drop_log[i_epoch].extend(chan)
if drop_inds:
select = np.ones(len(events), dtype=np.bool)
select[drop_inds] = False
self.events = self.events[select]
self._data = self._data[select]
self.selection[select]
if baseline is not None:
rescale(self._data, self.times, baseline, mode='mean', copy=False)
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
If True, a copy of epochs will be returned. Otherwise, the
function will operate in-place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
if copy:
epochs = epochs.copy()
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
if not isinstance(new_event_num, int):
raise ValueError('new_event_id value must be an integer')
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
[epochs.event_id.pop(key) for key in old_event_ids]
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Note that this operates on the Epochs instances in-place.
Example:
equalize_epoch_counts(epochs1, epochs2)
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
"""
if not all(isinstance(e, Epochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
[e.drop_bad_epochs() if not e._bad_dropped else None for e in epochs_list]
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop_epochs(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Helper to get indices to drop from multiple event timing lists"""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
if method not in ['mintime', 'truncate']:
raise ValueError('method must be either mintime or truncate, not '
'%s' % method)
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences"""
keep = np.ones((len(t_longer)), dtype=bool)
scores = np.ones((len(t_longer)))
for iter in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# Check every possible removal to see if it minimizes
for idx in np.where(keep)[0]:
keep[idx] = False
scores[idx] = _area_between_times(t_shorter, t_longer[keep])
keep[idx] = True
keep[np.argmin(scores)] = False
return keep
def _area_between_times(t1, t2):
"""Quantify the difference between two timing sets"""
x1 = list(range(len(t1)))
x2 = list(range(len(t2)))
xs = np.concatenate((x1, x2))
return np.sum(np.abs(np.interp(xs, x1, t1) - np.interp(xs, x2, t2)))
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to the criteria
defined in reject and flat. If full_report=True, it will give
True/False as well as a list of all offending channels.
"""
bad_list = list()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in iteritems(refl):
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
ch_name = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, ch_name))
has_printed = True
if not full_report:
return False
else:
bad_list.extend(ch_name)
if not full_report:
return True
else:
if bad_list == []:
return True, None
else:
return False, bad_list
@verbose
def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
"""Read epochs from a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
epochs : instance of Epochs
The epochs
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
epochs = Epochs(None, None, None, None, None)
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
# Read the measurement info
info, meas = read_meas_info(fid, tree)
info['filename'] = fname
events, mappings = _read_events_fif(fid, tree)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
fid.close()
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
if len(epochs_node) == 0:
fid.close()
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
comment = None
data = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
data = tag.data.astype(np.float)
elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
nsamp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Read the data
if data is None:
raise ValueError('Epochs data not found')
if data.shape[2] != nsamp:
fid.close()
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (data.shape[2], nsamp))
# Calibrate
cals = np.array([info['chs'][k]['cal'] * info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
data *= cals[np.newaxis, :, np.newaxis]
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
tmin = times[0]
tmax = times[-1]
# Put it all together
epochs.preload = True
epochs.raw = None
epochs.picks = np.arange(data.shape[1])
epochs._bad_dropped = True
epochs.events = events
epochs._data = data
epochs.info = info
epochs.tmin = tmin
epochs.tmax = tmax
epochs.name = comment
epochs.times = times
epochs._data = data
activate = False if epochs._check_delayed() else proj
epochs._projector, epochs.info = setup_proj(info, add_eeg_ref,
activate=activate)
epochs.baseline = baseline
epochs.event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
epochs.verbose = verbose
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(epochs))
if drop_log is None:
drop_log = [[] for _ in range(len(epochs))] # noqa, analysis:ignore
epochs.selection = selection
epochs.drop_log = drop_log
fid.close()
return epochs
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
random_state : None | int | np.random.RandomState
To specify the random generator state
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng.randint(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function"""
event_ids = set(tuple(epochs.event_id.items()) for epochs in epochs_list)
if len(event_ids) == 1:
event_id = dict(event_ids.pop())
else:
raise NotImplementedError("Epochs with unequal values for event_id")
tmins = set(epochs.tmin for epochs in epochs_list)
if len(tmins) == 1:
tmin = tmins.pop()
else:
raise NotImplementedError("Epochs with unequal values for tmin")
tmaxs = set(epochs.tmax for epochs in epochs_list)
if len(tmaxs) == 1:
tmax = tmaxs.pop()
else:
raise NotImplementedError("Epochs with unequal values for tmax")
baselines = set(epochs.baseline for epochs in epochs_list)
if len(baselines) == 1:
baseline = baselines.pop()
else:
raise NotImplementedError("Epochs with unequal values for baseline")
return event_id, tmin, tmax, baseline
@verbose
def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
verbose=None):
"""Concatenate channels, info and data from two Epochs objects
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
name : str
Comment that describes the Evoked data created.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless there is no
EEG in the data).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
event_id, tmin, tmax, baseline = _check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list) or add_eeg_ref
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.event_id = event_id
epochs.tmin = tmin
epochs.tmax = tmax
epochs.baseline = baseline
epochs.picks = None
epochs.name = name
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, ind):
"""Compare infos"""
if not info1['nchan'] == info2['nchan']:
raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
if not info1['bads'] == info2['bads']:
raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
if not info1['sfreq'] == info2['sfreq']:
raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
if not set(info1['ch_names']) == set(info2['ch_names']):
raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
if len(info2['projs']) != len(info1['projs']):
raise ValueError('SSP projectors in epochs files must be the same')
if not all(_proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError('SSP projectors in epochs files must be the same')
def concatenate_epochs(epochs_list):
"""Concatenate a list of epochs into one epochs object
Parameters
----------
epochs_list : list
list of Epochs instances to concatenate (in order).
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
out = epochs_list[0]
data = [out.get_data()]
events = [out.events]
drop_log = cp.deepcopy(out.drop_log)
event_id = cp.deepcopy(out.event_id)
for ii, epochs in enumerate(epochs_list[1:]):
_compare_epochs_infos(epochs.info, epochs_list[0].info, ii)
if not np.array_equal(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
data.append(epochs.get_data())
events.append(epochs.events)
drop_log.extend(epochs.drop_log)
event_id.update(epochs.event_id)
events = np.concatenate(events, axis=0)
events[:, 0] = np.arange(len(events)) # arbitrary after concat
out = EpochsArray(
data=np.concatenate(data, axis=0), info=out.info,
events=events,
event_id=event_id, tmin=out.tmin)
out.raw = None
out.preload = True
out.drop_log = drop_log
return out
| bsd-3-clause |
ual/urbansim | urbansim/developer/developer.py | 1 | 10647 | import pandas as pd
import numpy as np
class Developer(object):
"""
Pass the dataframe that is returned by feasibility here
Can also be a dictionary where keys are building forms and values are
the individual data frames returned by the proforma lookup routine.
"""
def __init__(self, feasibility):
if isinstance(feasibility, dict):
feasibility = pd.concat(feasibility.values(), keys=feasibility.keys(), axis=1)
self.feasibility = feasibility
@staticmethod
def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1)
def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df
@staticmethod
def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print "Number of agents: {:,}".format(num_agents)
print "Number of agent spaces: {:,}".format(int(num_units))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print "Current vacancy = {:.2f}".format(1 - num_agents / float(num_units))
print "Target vacancy = {:.2f}, target of new units = {:,}".\
format(target_vacancy, target_units)
return target_units
def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print "WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM"
return
# print "Describe of net units\n", df.net_units.describe()
print "Sum of net units that are profitable: {:,}".\
format(int(df.net_units.sum()))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print "WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO " \
"MATCH DEMAND"
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index()
@staticmethod
def merge(old_df, new_df, return_index=False):
"""
Merge two dataframes of buildings. The old dataframe is
usually the buildings dataset and the new dataframe is a modified
(by the user) version of what is returned by the pick method.
Parameters
----------
old_df : dataframe
Current set of buildings
new_df : dataframe
New buildings to add, usually comes from this module
return_index : bool
If return_index is true, this method will return the new
index of new_df (which changes in order to create a unique
index after the merge)
Returns
-------
df : dataframe
Combined DataFrame of buildings, makes sure indexes don't overlap
index : pd.Index
If and only if return_index is True, return the new index for the
new_df dataframe (which changes in order to create a unique index
after the merge)
"""
maxind = np.max(old_df.index.values)
new_df = new_df.reset_index(drop=True)
new_df.index = new_df.index + maxind + 1
concat_df = pd.concat([old_df, new_df], verify_integrity=True)
concat_df.index.name = 'building_id'
if return_index:
return concat_df, new_df.index
return concat_df
| bsd-3-clause |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| gpl-3.0 |
rubikloud/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.