repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
crazy-cat/incubator-mxnet | example/autoencoder/data.py | 27 | 1272 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
ssaeger/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
flightgong/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 3 | 5073 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
assert_true(np.all(gp.theta_ >= thetaL)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the GP interpolation for 2D output
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
| bsd-3-clause |
siou83/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
ChristophSchranz/ecg_via_raspberrypi | basic-ecg.py | 1 | 3093 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time # import the module called "time"
# import the grafic-module as plt
import matplotlib.pyplot as plt
import RPi.GPIO as GPIO # import GPIO to use the Pins
# set the sample rate (measurements per seconds)
# and its duration
sample_rate = 200
duration = 3
# initialize MCP 3208 with its resolution and pins:
channel = 0 # measure voltage on pin 0
# MCP 3208 has a resolution of 12 bits, that means
# there are 2^12 states starting with state 0
resolution = 4095
# set pins to connect the MCP 3208 to the rpi
CLK = 11 # Clock
MISO = 9 # Master (rpi) in, Slave (MCP) out
MOSI = 10 # Master out, Slave in
CS = 7 # Chip Select (rpi can activate MCP)
# initialize the GPIOs (General Purpose Input and Output)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# set the pins as output or input
GPIO.setup(CLK, GPIO.OUT)
GPIO.setup(MOSI, GPIO.OUT)
GPIO.setup(MISO, GPIO.IN)
GPIO.setup(CS, GPIO.OUT)
def read_adc(adcnum, clockpin = CLK, mosipin = MOSI, misopin = MISO, cspin = CS):
'''returns channel in voltage'''
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(14):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout # output in bitsdef
def measure_voltage(channel):
# create empty lists which will be appended
ekg_data = list()
time_stamps = list()
# This is a for loop. The code below a for loop will be
# excuted sample_rate*duration times.
for i in range(sample_rate*duration):
# read_adc returns an integer between 0 and 2^12-1 = 4095
# 2^12-1 = 4095, where 0 means 0 V, 4095 means 5 V.
# We need to scale this raw integer like this:
voltage = read_adc(channel) / resolution * 5
# TODO: try to comment out this print statement:
#print(voltage)
# add each data point the lists
ekg_data.append(voltage)
time_stamps.append(time.time())
# wait 1/sample_rate seconds
time.sleep(1/sample_rate)
print("Finished EKG measurement")
# This routine plots (creates a grafic) the data
plt.plot(time_stamps, ekg_data)
plt.title("EKG Measurement")
plt.xlabel("Time in Seconds")
plt.ylabel("Voltage in Volt")
plt.show()
# The code will be started from this line on.
if __name__ == "__main__":
print("Starting EKG measurement")
# The function measure_voltage will be executed (called)
# with the argument "channel" which was defined at the
# top of the code.
measure_voltage(channel)
| gpl-3.0 |
tammoippen/nest-simulator | topology/doc/user_manual_scripts/connections.py | 8 | 18562 | # -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(7654321)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l), 'red'),
(tp.FindNearestElement(l, [4., 5.]), 'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc), 'red'),
(tp.FindNearestElement(lpbc, [4., 5.]), 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer(
{'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5, 1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5, 1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25., 0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs, tgts))
ax.hist(dist, bins=50, histtype='stepfilled', normed=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2., 2.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrn_layer = tp.CreateLayer({'rows': 20,
'columns': 20,
'elements': 'iaf_psc_alpha'})
stim = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrn_layer, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrn_layer, rec, cdict_rec)
#{ end #}
# ----------------------------
#{ conn11 #}
rec = nest.Create('spike_detector')
nrns = nest.GetLeaves(nrn_layer, local_only=True)[0]
nest.Connect(nrns, rec)
#{ end #}
| gpl-2.0 |
murali-munna/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
strongh/GPy | GPy/core/parameterization/priors.py | 2 | 26352 | # Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import gammaln, digamma
from ...util.linalg import pdinv
from domains import _REAL, _POSITIVE
import warnings
import weakref
class Prior(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
cls._instance = super(Prior, cls).__new__(cls, *args, **kwargs)
return cls._instance
def pdf(self, x):
return np.exp(self.lnpdf(x))
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import priors_plots
priors_plots.univariate_plot(self)
def __repr__(self, *args, **kwargs):
return self.__str__()
class Gaussian(Prior):
"""
Implementation of the univariate Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
o = super(Prior, cls).__new__(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "N({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(x - self.mu) / self.sigma2
def lnpdf_grad(self, x):
return -(x - self.mu) / self.sigma2
def rvs(self, n):
return np.random.randn(n) * self.sigma + self.mu
# def __getstate__(self):
# return self.mu, self.sigma
#
# def __setstate__(self, state):
# self.mu = state[0]
# self.sigma = state[1]
# self.sigma2 = np.square(self.sigma)
# self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
class Uniform(Prior):
domain = _REAL
_instances = []
def __new__(cls, lower=0, upper=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
o = super(Prior, cls).__new__(cls, lower, upper)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
self.lower = float(lower)
self.upper = float(upper)
def __str__(self):
return "[{:.2g}, {:.2g}]".format(self.lower, self.upper)
def lnpdf(self, x):
region = (x >= self.lower) * (x <= self.upper)
return region
def lnpdf_grad(self, x):
return np.zeros(x.shape)
def rvs(self, n):
return np.random.uniform(self.lower, self.upper, size=n)
# def __getstate__(self):
# return self.lower, self.upper
#
# def __setstate__(self, state):
# self.lower = state[0]
# self.upper = state[1]
class LogGaussian(Gaussian):
"""
Implementation of the univariate *log*-Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
o = super(Prior, cls).__new__(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "lnN({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(np.log(x) - self.mu) / self.sigma2 - np.log(x)
def lnpdf_grad(self, x):
return -((np.log(x) - self.mu) / self.sigma2 + 1.) / x
def rvs(self, n):
return np.exp(np.random.randn(n) * self.sigma + self.mu)
class MultivariateGaussian(Prior):
"""
Implementation of the multivariate Gaussian probability function, coupled with random variables.
:param mu: mean (N-dimensional array)
:param var: covariance matrix (NxN)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, var=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu == mu) and np.all(instance().var == var):
return instance()
o = super(Prior, cls).__new__(cls, mu, var)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, var):
self.mu = np.array(mu).flatten()
self.var = np.array(var)
assert len(self.var.shape) == 2
assert self.var.shape[0] == self.var.shape[1]
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, self.hld = pdinv(self.var)
self.constant = -0.5 * self.input_dim * np.log(2 * np.pi) - self.hld
def summary(self):
raise NotImplementedError
def pdf(self, x):
return np.exp(self.lnpdf(x))
def lnpdf(self, x):
d = x - self.mu
return self.constant - 0.5 * np.sum(d * np.dot(d, self.inv), 1)
def lnpdf_grad(self, x):
d = x - self.mu
return -np.dot(self.inv, d)
def rvs(self, n):
return np.random.multivariate_normal(self.mu, self.var, n)
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import priors_plots
priors_plots.multivariate_plot(self)
def __getstate__(self):
return self.mu, self.var
def __setstate__(self, state):
self.mu = state[0]
self.var = state[1]
assert len(self.var.shape) == 2
assert self.var.shape[0] == self.var.shape[1]
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, self.hld = pdinv(self.var)
self.constant = -0.5 * self.input_dim * np.log(2 * np.pi) - self.hld
def gamma_from_EV(E, V):
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
return Gamma.from_EV(E, V)
class Gamma(Prior):
"""
Implementation of the Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, a=1, b=.5): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
o = super(Prior, cls).__new__(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, a, b):
self.a = float(a)
self.b = float(b)
self.constant = -gammaln(self.a) + a * np.log(b)
def __str__(self):
return "Ga({:.2g}, {:.2g})".format(self.a, self.b)
def summary(self):
ret = {"E[x]": self.a / self.b, \
"E[ln x]": digamma(self.a) - np.log(self.b), \
"var[x]": self.a / self.b / self.b, \
"Entropy": gammaln(self.a) - (self.a - 1.) * digamma(self.a) - np.log(self.b) + self.a}
if self.a > 1:
ret['Mode'] = (self.a - 1.) / self.b
else:
ret['mode'] = np.nan
return ret
def lnpdf(self, x):
return self.constant + (self.a - 1) * np.log(x) - self.b * x
def lnpdf_grad(self, x):
return (self.a - 1.) / x - self.b
def rvs(self, n):
return np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
@staticmethod
def from_EV(E, V):
"""
Creates an instance of a Gamma Prior by specifying the Expected value(s)
and Variance(s) of the distribution.
:param E: expected value
:param V: variance
"""
a = np.square(E) / V
b = E / V
return Gamma(a, b)
def __getstate__(self):
return self.a, self.b
def __setstate__(self, state):
self.a = state[0]
self.b = state[1]
self.constant = -gammaln(self.a) + self.a * np.log(self.b)
class InverseGamma(Gamma):
"""
Implementation of the inverse-Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, a=1, b=.5): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
o = super(Prior, cls).__new__(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, a, b):
self.a = float(a)
self.b = float(b)
self.constant = -gammaln(self.a) + a * np.log(b)
def __str__(self):
return "iGa({:.2g}, {:.2g})".format(self.a, self.b)
def lnpdf(self, x):
return self.constant - (self.a + 1) * np.log(x) - self.b / x
def lnpdf_grad(self, x):
return -(self.a + 1.) / x + self.b / x ** 2
def rvs(self, n):
return 1. / np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
class DGPLVM_KFDA(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable function using
Kernel Fisher Discriminant Analysis by Seung-Jean Kim for implementing Face paper
by Chaochao Lu.
:param lambdaa: constant
:param sigma2: constant
.. Note:: Surpassing Human-Level Face paper dgplvm implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, lambdaa, sigma2): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, lambdaa, sigma2, lbl, kern, x_shape):
"""A description for init"""
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in xrange(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
if len(cls) > 2:
for i in range(2, self.classnum):
del cls[i]
return cls
def x_reduced(self, cls):
x1 = cls[0]
x2 = cls[1]
x = np.concatenate((x1, x2), axis=0)
return x
def compute_lst_ni(self):
lst_ni = []
lst_ni1 = []
lst_ni2 = []
f1 = (np.where(self.lbl[:, 0] == 1)[0])
f2 = (np.where(self.lbl[:, 1] == 1)[0])
for idx in f1:
lst_ni1.append(idx)
for idx in f2:
lst_ni2.append(idx)
lst_ni.append(len(lst_ni1))
lst_ni.append(len(lst_ni2))
return lst_ni
def compute_a(self, lst_ni):
a = np.ones((self.datanum, 1))
count = 0
for N_i in lst_ni:
if N_i == lst_ni[0]:
a[count:count + N_i] = (float(1) / N_i) * a[count]
count += N_i
else:
if N_i == lst_ni[1]:
a[count: count + N_i] = -(float(1) / N_i) * a[count]
count += N_i
return a
def compute_A(self, lst_ni):
A = np.zeros((self.datanum, self.datanum))
idx = 0
for N_i in lst_ni:
B = float(1) / np.sqrt(N_i) * (np.eye(N_i) - ((float(1) / N_i) * np.ones((N_i, N_i))))
A[idx:idx + N_i, idx:idx + N_i] = B
idx += N_i
return A
# Here log function
def lnpdf(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
a_trans = np.transpose(self.a)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
J = a_trans.dot(K).dot(self.a) - a_trans.dot(K).dot(self.A).dot(inv_part).dot(self.A).dot(K).dot(self.a)
J_star = (1. / self.lambdaa) * J
return (-1. / self.sigma2) * J_star
# Here gradient function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
b = self.A.dot(inv_part).dot(self.A).dot(K).dot(self.a)
a_Minus_b = self.a - b
a_b_trans = np.transpose(a_Minus_b)
DJ_star_DK = (1. / self.lambdaa) * (a_Minus_b.dot(a_b_trans))
DJ_star_DX = self.kern.gradients_X(DJ_star_DK, x)
return (-1. / self.sigma2) * DJ_star_DX
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior'
def __getstate___(self):
return self.lbl, self.lambdaa, self.sigma2, self.kern, self.x_shape
def __setstate__(self, state):
lbl, lambdaa, sigma2, kern, a, A, x_shape = state
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
class DGPLVM(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, mu, sigma): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, sigma2, lbl, x_shape):
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in xrange(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
M_i[i] = np.mean(cls[i], axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in xrange(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in xrange(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in xrange(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
# Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
return DPx_Dx.T
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior'
class HalfT(Prior):
"""
Implementation of the half student t probability function, coupled with random variables.
:param A: scale parameter
:param nu: degrees of freedom
"""
domain = _POSITIVE
_instances = []
def __new__(cls, A, nu): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().A == A and instance().nu == nu:
return instance()
o = super(Prior, cls).__new__(cls, A, nu)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, A, nu):
self.A = float(A)
self.nu = float(nu)
self.constant = gammaln(.5*(self.nu+1.)) - gammaln(.5*self.nu) - .5*np.log(np.pi*self.A*self.nu)
def __str__(self):
return "hT({:.2g}, {:.2g})".format(self.A, self.nu)
def lnpdf(self,theta):
return (theta>0) * ( self.constant -.5*(self.nu+1) * np.log( 1.+ (1./self.nu) * (theta/self.A)**2 ) )
#theta = theta if isinstance(theta,np.ndarray) else np.array([theta])
#lnpdfs = np.zeros_like(theta)
#theta = np.array([theta])
#above_zero = theta.flatten()>1e-6
#v = self.nu
#sigma2=self.A
#stop
#lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5)
# - gammaln(v * 0.5)
# - 0.5*np.log(sigma2 * v * np.pi)
# - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2))
#)
#return lnpdfs
def lnpdf_grad(self,theta):
theta = theta if isinstance(theta,np.ndarray) else np.array([theta])
grad = np.zeros_like(theta)
above_zero = theta>1e-6
v = self.nu
sigma2=self.A
grad[above_zero] = -0.5*(v+1)*(2*theta[above_zero])/(v*sigma2 + theta[above_zero][0]**2)
return grad
def rvs(self, n):
#return np.random.randn(n) * self.sigma + self.mu
from scipy.stats import t
#[np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)])
ret = t.rvs(self.nu,loc=0,scale=self.A, size=n)
ret[ret<0] = 0
return ret
| bsd-3-clause |
basilfx/RIOT | tests/pkg_tensorflow-lite/mnist/generate_digit.py | 19 | 1164 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = mnist.load_data()
data = mnist_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
clemkoa/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 15 | 17696 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import BaseEstimator, ClassifierMixin
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)],
weights=[1, 2])
msg = "Names provided are not unique: ['lr', 'lr']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr__', clf)])
msg = "Estimator names must not contain __: got ['lr__']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('estimators', clf)])
msg = "Estimator names conflict with constructor arguments: ['estimators']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_fit():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_almost_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
def test_sample_weight_kwargs():
"""Check that VotingClassifier passes sample_weight as kwargs"""
class MockClassifier(BaseEstimator, ClassifierMixin):
"""Mock Classifier to check that sample_weight is received as kwargs"""
def fit(self, X, y, *args, **sample_weight):
assert_true('sample_weight' in sample_weight)
clf = MockClassifier()
eclf = VotingClassifier(estimators=[('mock', clf)], voting='soft')
# Should not raise an error.
eclf.fit(X, y, sample_weight=np.ones((len(y),)))
def test_set_params():
"""set_params should be able to set estimators"""
clf1 = LogisticRegression(random_state=123, C=1.0)
clf2 = RandomForestClassifier(random_state=123, max_depth=None)
clf3 = GaussianNB()
eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft',
weights=[1, 2])
assert_true('lr' in eclf1.named_estimators)
assert_true(eclf1.named_estimators.lr is eclf1.estimators[0][1])
assert_true(eclf1.named_estimators.lr is eclf1.named_estimators['lr'])
eclf1.fit(X, y)
assert_true('lr' in eclf1.named_estimators_)
assert_true(eclf1.named_estimators_.lr is eclf1.estimators_[0])
assert_true(eclf1.named_estimators_.lr is eclf1.named_estimators_['lr'])
eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft',
weights=[1, 2])
eclf2.set_params(nb=clf2).fit(X, y)
assert_false(hasattr(eclf2, 'nb'))
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params())
assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params())
eclf1.set_params(lr__C=10.0)
eclf2.set_params(nb__max_depth=5)
assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0)
assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5)
assert_equal(eclf1.get_params()["lr__C"],
eclf1.get_params()["lr"].get_params()['C'])
def test_set_estimator_none():
"""VotingClassifier set_params should be able to set estimators as None"""
# Test predict
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 0, 0.5]).fit(X, y)
eclf2 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 1, 0.5])
eclf2.set_params(rf=None).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_true(dict(eclf2.estimators)["rf"] is None)
assert_true(len(eclf2.estimators_) == 2)
assert_true(all([not isinstance(est, RandomForestClassifier) for est in
eclf2.estimators_]))
assert_true(eclf2.get_params()["rf"] is None)
eclf1.set_params(voting='soft').fit(X, y)
eclf2.set_params(voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
msg = ('All estimators are None. At least one is required'
' to be a classifier!')
assert_raise_message(
ValueError, msg, eclf2.set_params(lr=None, rf=None, nb=None).fit, X, y)
# Test soft voting transform
X1 = np.array([[1], [2]])
y1 = np.array([1, 2])
eclf1 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[0, 0.5]).fit(X1, y1)
eclf2 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[1, 0.5])
eclf2.set_params(rf=None).fit(X1, y1)
assert_array_almost_equal(eclf1.transform(X1),
np.array([[[0.7, 0.3], [0.3, 0.7]],
[[1., 0.], [0., 1.]]]))
assert_array_almost_equal(eclf2.transform(X1),
np.array([[[1., 0.],
[0., 1.]]]))
eclf1.set_params(voting='hard')
eclf2.set_params(voting='hard')
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
def test_estimator_weights_format():
# Test estimator weights inputs as list and array
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=[1, 2],
voting='soft')
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=np.array((1, 2)),
voting='soft')
eclf1.fit(X, y)
eclf2.fit(X, y)
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_transform():
"""Check transform method of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft').fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
flatten_transform=True).fit(X, y)
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
flatten_transform=False).fit(X, y)
warn_msg = ("'flatten_transform' default value will be "
"changed to True in 0.21."
"To silence this warning you may"
" explicitly set flatten_transform=False.")
res = assert_warns_message(DeprecationWarning, warn_msg,
eclf1.transform, X)
assert_array_equal(res.shape, (3, 4, 2))
assert_array_equal(eclf2.transform(X).shape, (4, 6))
assert_array_equal(eclf3.transform(X).shape, (3, 4, 2))
assert_array_almost_equal(res.swapaxes(0, 1).reshape((4, 6)),
eclf2.transform(X))
assert_array_almost_equal(
eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)),
eclf2.transform(X)
)
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
ecrc/girih | scripts/sisc/paper_plot_thread_scaling_naive_out_cache.py | 2 | 6303 | #!/usr/bin/env python
def main():
import sys
raw_data = load_csv(sys.argv[1])
k_l = set()
for k in raw_data:
k_l.add(get_stencil_num(k))
k_l = list(k_l)
# for ts in ['Naive', 'Dynamic-Intra-Diamond']
for k in k_l:
for is_dp in [1]:
plot_lines(raw_data, k, is_dp)
def get_stencil_num(k):
# add the stencil operator
if k['Stencil Kernel coefficients'] in 'constant':
if int(k['Stencil Kernel semi-bandwidth'])==4:
stencil = 0
else:
stencil = 1
elif 'no-symmetry' in k['Stencil Kernel coefficients']:
stencil = 5
elif 'sym' in k['Stencil Kernel coefficients']:
if int(k['Stencil Kernel semi-bandwidth'])==1:
stencil = 3
else:
stencil = 4
else:
stencil = 2
return stencil
def plot_lines(raw_data, stencil_kernel, is_dp):
from operator import itemgetter
import matplotlib.pyplot as plt
import matplotlib
import pylab
from pylab import arange,pi,sin,cos,sqrt
if stencil_kernel == 1:
fig_width = 3.4*0.393701 # inches
else:
fig_width = 3.0*0.393701 # inches
fig_height = 1.8*fig_width #* 210.0/280.0#433.62/578.16
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize': 7,
'axes.linewidth': 0.5,
'lines.linewidth': 0.75,
'text.fontsize': 7,
'legend.fontsize': 5,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'lines.markersize': 3,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
ts_l = set()
for k in raw_data:
ts_l.add(k['Time stepper orig name'])
ts_l = list(ts_l)
th = set()
for k in raw_data:
th.add(int(k['OpenMP Threads']))
th = list(th)
#tb_l = [3, 7]
tb_l = set()
for k in raw_data:
tb_l.add(k['Time unroll'])
tb_l = list(tb_l)
tb_l = map(int,tb_l)
tb_l.sort()
#print tb_l
req_fields = [('Thread group size', int), ('WD main-loop RANK0 MStencil/s MAX', float), ('Time stepper orig name', str), ('OpenMP Threads', int), ('MStencil/s MAX', float), ('Time unroll',int), ('Sustained Memory BW', float)]
data = []
for k in raw_data:
tup = {}
# add the general fileds
for f in req_fields:
tup[f[0]] = map(f[1], [k[f[0]]] )[0]
# add the stencil operato
tup['stencil'] = get_stencil_num(k)
# add the precision information
if k['Precision'] in 'DP':
p = 1
else:
p = 0
tup['Precision'] = p
data.append(tup)
data = sorted(data, key=itemgetter('Time stepper orig name', 'Time unroll', 'Thread group size', 'OpenMP Threads'))
#for i in data: print i
max_single = 0
fig, ax1 = plt.subplots()
lns = []
col = 'g'
ts2 = 'Spatial blk.'
x = []
y = []
y_m = []
for k in data:
if ( ('Naive' in k['Time stepper orig name']) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):
if k['OpenMP Threads'] == 1 and max_single < k['MStencil/s MAX']/10**3: max_single = k['MStencil/s MAX']/10**3
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['OpenMP Threads'])
y.append(k['MStencil/s MAX']/10**3)
if(x):
lns = lns + ax1.plot(x, y, color=col, marker='o', linestyle='-', label='Perf. (GLUP/s)')
if(y_m):
ax2 = ax1.twinx()
lns = lns + ax2.plot(x, y_m, color='r', marker='^', linestyle='-', label='BW (GBytes/s)')
# add ideal scaling
ideal = [i*max_single for i in th]
lns2 = ax1.plot(th, ideal, color='g', linestyle='--', label='Ideal scaling')
# add limits
mem_limit=0
# sus_mem_bw = 36500 #SB
sus_mem_bw = 40.0 #IB
if stencil_kernel == 0:
mem_limit = sus_mem_bw/(4*4)
elif stencil_kernel == 1:
mem_limit = sus_mem_bw/(4*3)
elif stencil_kernel == 4:
mem_limit = sus_mem_bw/(4*(3+13))
elif stencil_kernel == 5:
mem_limit = sus_mem_bw/(4*(3+7))
if is_dp == 1: mem_limit = mem_limit / 2
#if stencil_kernel ==1:
lns3 = ax1.plot([5, len(th)], [mem_limit, mem_limit], color='k', linestyle='-', label='Perf. limit')
# lns = lns + ax2.plot([6, len(th)], [sus_mem_bw, sus_mem_bw], color='m', linestyle='-', label='STREAM BW')
#title = 'Thread scaling of'
title = ''
if stencil_kernel == 0:
title = '25_pt_const_naive_large'
# title = title + ' 25_pt constant coeff. star stencil'
elif stencil_kernel == 1:
title = '7_pt_const_naive_large'
# title = title + ' 7_pt constant coeff. star stencil'
elif stencil_kernel == 2:
title = title + ' 7_pt varialbe. coeff. star stencil'
elif stencil_kernel == 3:
title = title + ' 7_pt varialbe. coeff. axis symm star stencil'
elif stencil_kernel == 4:
title = '25_pt_var_naive_large'
# title = title + ' 25_pt varialbe. coeff. axis symm star stencil'
elif stencil_kernel == 5:
title = '7_pt_var_naive_large'
# title = title + ' 7_pt varialbe. coeff. no symm star stencil'
#if is_dp == 1:
# title = title + ' in double precision'
#else:
# title = title + ' in single precision'
f_name = title.replace(' ', '_')
ax1.set_xlabel('Threads')
plt.xticks(range(2,11,2))
if stencil_kernel == 1:
ax1.set_ylabel('GLUP/s', color='g', labelpad=0)
ax2.set_ylabel('GBytes/s', color='r', labelpad=0)
# plt.title(title)
lns = lns2 + lns3 + lns
labs = [l.get_label() for l in lns]
if stencil_kernel == 1:
ax1.legend(lns, labs, loc='lower right')
ax2.set_ylim(ymin=0, ymax=42)
ax1.grid()
pylab.savefig(f_name+'.png', bbox_inches="tight", pad_inches=0.04)
pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches="tight", pad_inches=0)
#plt.show()
plt.clf()
def load_csv(data_file):
from csv import DictReader
with open(data_file, 'rb') as output_file:
data = DictReader(output_file)
data = [k for k in data]
return data
if __name__ == "__main__":
main()
| bsd-3-clause |
annayqho/TheCannon | code/lamost/tutorial/run_tutorial.py | 1 | 4224 | import numpy as np
from astropy.table import Table
from TheCannon.lamost import load_spectra
import matplotlib.pyplot as plt
from TheCannon import dataset
from TheCannon import model
labdir = "/Users/annaho/Github_Repositories/TheCannon/docs/source"
data = Table.read("%s/lamost_labels.fits" %labdir)
print(data.colnames)
filename = data['LAMOST_ID'][0].strip()
print(filename)
specdir = "/Users/annaho/Github_Repositories/TheCannon/docs/source/spectra"
wl, flux, ivar = load_spectra("%s/" %specdir + filename)
plt.step(wl, flux, where='mid', linewidth=0.5, color='k')
plt.xlabel("Wavelength (Angstroms)")
plt.ylabel("Flux")
plt.savefig("sample_spec.png")
plt.close()
# Now, do all of the files
filenames = np.array([val.strip() for val in data['LAMOST_ID']])
filenames_full = np.array([specdir+"/"+val.strip() for val in filenames])
wl, flux, ivar = load_spectra(filenames_full)
#plt.hist(SNR)
#plt.show()
# Use 1000 stars as a training set
tr_flux = flux[0:1000]
tr_ivar = ivar[0:1000]
tr_ID = filenames[0:1000]
# Now you need to get the training labels.
inds = np.array([np.where(filenames==val)[0][0] for val in tr_ID])
tr_teff = data['TEFF'][inds]
tr_logg = data['LOGG'][inds]
tr_mh = data['PARAM_M_H'][inds]
tr_alpham = data['PARAM_ALPHA_M'][inds]
# Take a look at the teff-logg diagram, color-coded by metallicity
plt.scatter(tr_teff, tr_logg, c=tr_mh, lw=0, s=7, cmap="viridis")
plt.gca().invert_xaxis()
plt.xlabel("Teff")
plt.ylabel("logg")
plt.colorbar(label="M/H")
plt.savefig("teff_logg_training.png")
plt.close()
# Note that there are very few stars at low metallicity,
# so it will probably be challenging to do as good of a job
# or get as precise results here.
# OK, so now we need to get our stuff into the format specified on the website
# Wavelength grid with shape [num_pixels]
print(wl.shape)
# So, there are 3626 pixels.
# Block of flux values, inverse variance values with shape
print(tr_ID.shape)
print(tr_flux.shape)
print(tr_ivar.shape)
# [num_training_objects, num_pixels]
# (1339, 3626)
# Fine. Not normalized yet, but we will do that later.
# Now we need a block of training labels
# [num_training_objects, num_labels]
# Right now we have them separate, combine into an array of this shape:
tr_label = np.vstack((tr_teff, tr_logg, tr_mh, tr_alpham))
# Note that that gives us (4,1339) which is (num_labels, num_tr_obj)
# So we need to take the transpose
tr_label = np.vstack((tr_teff, tr_logg, tr_mh, tr_alpham)).T
# Now we need to define our "test set": a bunch of other
# spectra whose labels we want to determine and don't know yet.
# Let's use some of the other spectra in the dataset
# Say, the ones with 80 < SNR < 100
test_ID = filenames[1000:]
test_flux = flux[1000:]
test_ivar = ivar[1000:]
# Check the sizes
print(test_ID.shape)
print(test_flux.shape)
print(test_ivar.shape)
# OK, excellent. Now we're ready!
ds = dataset.Dataset(
wl, tr_ID, tr_flux, tr_ivar, tr_label, test_ID, test_flux, test_ivar)
ds.set_label_names(['T_{eff}', '\log g', '[M/H]', '[\\alpha/M]'])
fig = ds.diagnostics_SNR()
plt.savefig("SNR_hist.png")
plt.close()
fig = ds.diagnostics_ref_labels()
plt.savefig("ref_labels.png")
plt.close()
ds.continuum_normalize_gaussian_smoothing(L=50)
plt.step(ds.wl, ds.tr_flux[0], where='mid', linewidth=0.5, color='k')
plt.xlabel("Wavelength (Angstroms)")
plt.ylabel("Flux")
plt.savefig("norm_spec.png")
plt.close()
# Now, fit for a polynomial model of order 2 (quadratic)
m = model.CannonModel(2, useErrors=False)
m.fit(ds)
fig = m.diagnostics_leading_coeffs(ds)
plt.savefig("leading_coeffs.png")
plt.close()
# Test
print(ds.tr_label.shape)
print(m.pivots)
starting_guess = np.mean(ds.tr_label,axis=0)-m.pivots
print(starting_guess)
errs, chisq = m.infer_labels(ds, starting_guess)
ds.diagnostics_survey_labels()
plt.savefig("survey_labels.png")
plt.close()
inds = np.array([np.where(filenames==val)[0][0] for val in ds.test_ID])
test_teff = data['TEFF'][inds]
test_logg = data['LOGG'][inds]
test_mh = data['PARAM_M_H'][inds]
test_alpham = data['PARAM_ALPHA_M'][inds]
test_label = np.vstack((test_teff, test_logg, test_mh, test_alpham)).T
ds.tr_label = test_label
ds.diagnostics_1to1()
plt.savefig("survey_ref_comparison.png")
plt.close()
| mit |
fzalkow/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
blueburningcoder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| agpl-3.0 |
snurk/meta-strains | scripts/accuracy.py | 1 | 5070 | import pandas as pd
import numpy as np
from collections import Counter
from itertools import permutations
import networkx as nx
def print_edges(edges):
print(','.join([str(e) for e in edges]))
# Считываем граф
# Oставляем только большую компоненту связности
G = nx.DiGraph()
with open("assembly/K127/prelim_graph.gfa") as f:
for line in f:
line = line.split()
line_type = line[0]
# S 238024 ACCAATTAT KC:i:37210
if line_type == "S":
v_name = int(line[1])
v_length = len(line[2])
G.add_node(v_name, length=v_length)
# L 238322 + 19590 - 55M
if line_type == "L":
v1 = int(line[1])
v2 = int(line[3])
G.add_edge(v1, v2)
# remain only largest component
new_G = nx.DiGraph()
for g in nx.weakly_connected_component_subgraphs(G):
#print(g.number_of_nodes())
if new_G.number_of_nodes() < g.number_of_nodes():
new_G = g.copy()
G = new_G.copy()
# Табличка с референсами
# Считываем файл ответа, как он есть
df_ref = pd.read_csv("refs/refs_edges.txt", header=None, names=["e"])
df_ref = df_ref["e"].str.split('\t', 1, expand=True)
df_ref.columns = ["e_id", "strains"]
df_ref = df_ref.set_index("e_id")
df_ref.index = df_ref.index.astype("int")
# Оставляем только ребра из большой компоненты:
df_ref = df_ref.loc[list(G.nodes)]
# Выкидываем рёбра, которые никому не принадлежат, из таблицы:
nobody_edges = df_ref[df_ref["strains"].isnull()].index
print("{n} edges are Nobody".format(n=len(nobody_edges)), '\n')
df_ref = df_ref.loc[set(df_ref.index) - set(nobody_edges)]
# Сплитим список референсов:
df_ref["strains"] = df_ref["strains"].str.split('\t')
df_ref["strains"] = df_ref["strains"].apply(lambda x: [s.rpartition('_')[0] for s in x])
df_ref["strains"] = df_ref["strains"].apply(Counter)
# Считаем копийность каждого ребра:
df_ref["single_copy"] = df_ref["strains"].apply(lambda x: x.most_common(1)[0][1] == 1)
# Считываем длину рёбер:
df_length = pd.read_csv("edges_lengths.tsv", sep='\t', header=None, names=["length"])
df_length = df_length.astype("int")
df_ref = df_ref.join(df_length, how='inner')
# Считываем профили
ref_profile = pd.read_csv("refs/profile.csv", header=None, index_col=0)
for i in range(1, 11):
ref_profile[i] = ref_profile[i] / ref_profile[i].sum()
desman_profile = pd.read_csv("desman_results/%s_0/desman_freqs.csv" % len(ref_profile),
header=None, index_col=0, dtype=float)
desman_profile.index = desman_profile.index.astype(int)
# Ищем соответствие между профилями:
ref_freqs = ref_profile.as_matrix()
ans_error = float("Inf")
ans_permut = None
for cur_permut in permutations(desman_profile.index):
desman_freqs = desman_profile.loc[cur_permut, :].as_matrix()
#print(cur_error, cur_permut)
cur_error = ((ref_freqs - desman_freqs) ** 2).sum()
if cur_error < ans_error:
ans_error = cur_error
ans_permut = cur_permut
print("Error:", ans_error)
def invert_permutation(permutation):
return [i for i, j in sorted(enumerate(permutation), key=lambda x: x[1])]
strains = list('s' + ref_profile.iloc[invert_permutation(ans_permut), :].index.astype(str))
# Табличка ответов DESMAN
df_desman = pd.read_csv("desman_results/%s_0/gene_assignment_etaS_df.csv" % len(strains), skiprows=1,
names=["e_id"] + strains)
df_desman['e_id'] = df_desman['e_id'].str[1:].astype("int")
df_desman = df_desman.set_index('e_id')
df_desman[strains] = df_desman[strains].astype('int')
#df_desman = df_desman.join(df_length, how='inner')
df_desman = df_desman.loc[set(df_ref.index) - set(nobody_edges)]
for cur_s in strains:
df_ref[cur_s] = df_ref['strains'].apply(lambda x: int(cur_s in x))
# Точность DESMAN
df_ref.sort_index(inplace=True)
df_desman.sort_index(inplace=True)
right_answers = (df_ref[strains] == df_desman[strains]).sum(axis=1) == len(strains)
print("Accuracy on all edges: %.2f" % (right_answers.sum() / len(df_ref)))
long_edges = df_ref['length'] > 500
print("Percent of long edges: %.2f" % (long_edges.sum() / len(df_ref)))
print("Accuracy on long edges: %.2f" % ((right_answers & long_edges).sum() / long_edges.sum()))
print()
for cur_s in strains:
print(cur_s)
right_answers = df_ref[cur_s] == df_desman[cur_s]
print("Accuracy on all edges: %.2f" % (right_answers.sum() / len(df_ref)))
print("Accuracy on long edges: %.2f" % ((right_answers & long_edges).sum() / long_edges.sum()))
print()
tips_1 = 0
tips_2 = 0
for v in G.nodes:
if G.in_degree(v) == 0:
tips_1 += 1
if G.out_degree(v) == 0:
tips_2 += 1
print("in_degree == 0, out_degree == 0, all_nodes")
print(tips_1, tips_2, len(G.nodes))
| mit |
larsmans/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 16 | 7821 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/axis.py | 2 | 72371 | """
Classes for the ticks and x and y axis
"""
from __future__ import division
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
GRIDLINE_INTERPOLATION_STEPS = 180
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size = None, # points
width = None,
color = None,
tickdir = None,
pad = None,
labelsize = None,
labelcolor = None,
zorder = None,
gridOn = None, # defaults to axes.grid
tick1On = True,
tick2On = True,
label1On = True,
label2On = False,
major = True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
artist.Artist.__init__(self)
if gridOn is None:
gridOn = rcParams['axes.grid']
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
self._name = name
self._loc = loc
if size is None:
if major:
size = rcParams['%s.major.size'%name]
else:
size = rcParams['%s.minor.size'%name]
self._size = size
if width is None:
if major:
width = rcParams['%s.major.width'%name]
else:
width = rcParams['%s.minor.width'%name]
self._width = width
if color is None:
color = rcParams['%s.color' % name]
self._color = color
if pad is None:
if major:
pad = rcParams['%s.major.pad'%name]
else:
pad = rcParams['%s.minor.pad'%name]
self._base_pad = pad
if labelcolor is None:
labelcolor = rcParams['%s.color' % name]
self._labelcolor = labelcolor
if labelsize is None:
labelsize = rcParams['%s.labelsize' % name]
self._labelsize = labelsize
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
self.apply_tickdir(tickdir)
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def apply_tickdir(self, tickdir):
"""
Calculate self._pad and self._tickmarkers
"""
pass
def get_children(self):
children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
#self.tick1line.set_clip_path(clippath, transform)
#self.tick2line.set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if callable(self._contains): return self._contains(self,mouseevent)
return False,{}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._apply_params(pad=val)
def get_pad(self):
'Get the value of the tick label pad in points'
return self._base_pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
@allow_rasterization
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__name__)
midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())
if midPoint:
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
def _set_artist_props(self, a):
a.set_figure(self.figure)
#if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']
switches = [k for k in kw if k in switchkw]
for k in switches:
setattr(self, k, kw.pop(k))
dirpad = [k for k in kw if k in ['pad', 'tickdir']]
if dirpad:
self._base_pad = kw.pop('pad', self._base_pad)
self.apply_tickdir(kw.pop('tickdir', self._tickdir))
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
self.tick1line.set_marker(self._tickmarkers[0])
self.tick2line.set_marker(self._tickmarkers[1])
tick_kw = dict([kv for kv in kw.items()
if kv[0] in ['color', 'zorder']])
if tick_kw:
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in tick_kw.items():
setattr(self, '_'+k, v)
tick_list = [kv for kv in kw.items() if kv[0] in ['size', 'width']]
for k, v in tick_list:
setattr(self, '_'+k, v)
if k == 'size':
self.tick1line.set_markersize(v)
self.tick2line.set_markersize(v)
else:
self.tick1line.set_markeredgewidth(v)
self.tick2line.set_markeredgewidth(v)
label_list = [k for k in kw.items()
if k[0] in ['labelsize', 'labelcolor']]
if label_list:
label_kw = dict([(k[5:], v) for (k, v) in label_list])
self.label1.set(**label_kw)
self.label2.set(**label_kw)
for k, v in label_kw.items():
setattr(self, '_'+k, v)
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
self._pad = self._base_pad
else:
self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._pad = self._base_pad + self._size
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,),
color=self._color,
linestyle = 'None',
marker = self._tickmarkers[0],
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder,
)
l.set_transform(self.axes.get_xaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D( xdata=(0,), ydata=(1,),
color=self._color,
linestyle = 'None',
marker = self._tickmarkers[1],
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder,
)
l.set_transform(self.axes.get_xaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_xaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
self._pad = self._base_pad
else:
self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = self._base_pad + self._size
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
#t.set_transform( self.axes.transData )
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (0,), (0,),
color=self._color,
marker = self._tickmarkers[0],
linestyle = 'None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder,
)
l.set_transform(self.axes.get_yaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (1,), (0,),
color=self._color,
marker = self._tickmarkers[1],
linestyle = 'None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder,
)
l.set_transform(self.axes.get_yaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_yaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y( y )
if self.label2On:
self.label2.set_y( y )
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
class Ticker:
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`axes.transData` - transform data coords to display coords
* :attr:`axes.transAxes` - transform axis coords to display coords
* :attr:`labelpad` - number of points between the axis and its label
"""
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)"%tuple(self.axes.transAxes.transform_point((0,0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
#class dummy:
# locator = None
# formatter = None
#self.major = dummy()
#self.minor = dummy()
self._autolabelpos = True
self._smart_bounds = False
self.label = self._get_label()
self.labelpad = 5
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.cla()
self.set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label, self.offsetText]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
self.set_label_text('')
self._set_artist_props(self.label)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._gridOnMajor = rcParams['axes.grid']
self._gridOnMinor = False
self.label.set_text('')
self._set_artist_props(self.label)
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
def reset_ticks(self):
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks and ticklabels.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
dicts = []
if which == 'major' or which == 'both':
dicts.append(self._major_tick_kw)
if which == 'minor' or which == 'both':
dicts.append(self._minor_tick_kw)
kwtrans = self._translate_tick_kw(kw, to_init_kw=True)
for d in dicts:
if reset:
d.clear()
d.update(kwtrans)
if reset:
self.reset_ticks()
else:
if which == 'major' or which == 'both':
for tick in self.majorTicks:
tick._apply_params(**self._major_tick_kw)
if which == 'minor' or which == 'both':
for tick in self.minorTicks:
tick._apply_params(**self._minor_tick_kw)
@staticmethod
def _translate_tick_kw(kw, to_init_kw=True):
# We may want to move the following function to
# a more visible location; or maybe there already
# is something like this.
def _bool(arg):
if cbook.is_string_like(arg):
if arg.lower() == 'on':
return True
if arg.lower() == 'off':
return False
raise ValueError('String "%s" should be "on" or "off"' % arg)
return bool(arg)
# The following lists may be moved to a more
# accessible location.
kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On']
kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop']
kwkeys = kwkeys0 + kwkeys1
kwtrans = dict()
if to_init_kw:
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'left' in kw:
kwtrans['tick1On'] = _bool(kw.pop('left'))
if 'bottom' in kw:
kwtrans['tick1On'] = _bool(kw.pop('bottom'))
if 'right' in kw:
kwtrans['tick2On'] = _bool(kw.pop('right'))
if 'top' in kw:
kwtrans['tick2On'] = _bool(kw.pop('top'))
if 'labelleft' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelleft'))
if 'labelbottom' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelbottom'))
if 'labelright' in kw:
kwtrans['label2On'] = _bool(kw.pop('labelright'))
if 'labeltop' in kw:
kwtrans['label2On'] = _bool(kw.pop('labeltop'))
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
else:
raise NotImplementedError("Inverse translation is deferred")
return kwtrans
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'set the axis data limits'
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
'set the default limits for the axis data and view interval if they are not mutated'
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if is available (else just use 0..1)
pass
def _set_artist_props(self, a):
if a is None: return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw, renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def set_smart_bounds(self,value):
"""set the axis to have smart bounds"""
self._smart_bounds = value
def get_smart_bounds(self):
"""get whether the axis has smart bounds"""
return self._smart_bounds
def _update_ticks(self, renderer):
"""
Update ticks (position and labels) using the current data
interval of the axes. Returns a list of ticks that will be
drawn.
"""
interval = self.get_view_interval()
tick_tups = [ t for t in self.iter_ticks()]
if self._smart_bounds:
# handle inverted limits
view_low, view_high = min(*interval), max(*interval)
data_low, data_high = self.get_data_interval()
if data_low > data_high:
data_low, data_high = data_high, data_low
locs = [ti[1] for ti in tick_tups]
locs.sort()
locs = np.array(locs)
if len(locs):
if data_low <= view_low:
# data extends beyond view, take view as limit
ilow = view_low
else:
# data stops within view, take best tick
cond = locs <= data_low
good_locs = locs[cond]
if len(good_locs) > 0:
# last tick prior or equal to first data point
ilow = good_locs[-1]
else:
# No ticks (why not?), take first tick
ilow = locs[0]
if data_high >= view_high:
# data extends beyond view, take view as limit
ihigh = view_high
else:
# data stops within view, take best tick
cond = locs >= data_high
good_locs = locs[cond]
if len(good_locs) > 0:
# first tick after or equal to last data point
ihigh = good_locs[0]
else:
# No ticks (why not?), take last tick
ihigh = locs[-1]
tick_tups = [ ti for ti in tick_tups
if (ti[1] >= ilow) and (ti[1] <= ihigh)]
ticks_to_draw = []
for tick, loc, label in tick_tups:
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""
Given the list of ticks, return two lists of bboxes. One for
tick lable1's and another for tick label2's.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
for tick in ticks:
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
return ticklabelBoxes, ticklabelBoxes2
def get_tightbbox(self, renderer):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
"""
if not self.get_visible(): return
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw, renderer)
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
bb = []
for a in [self.label, self.offsetText]:
if a.get_visible():
bb.append(a.get_window_extent(renderer))
bb.extend(ticklabelBoxes)
bb.extend(ticklabelBoxes2)
#self.offsetText
bb = [b for b in bb if b.width!=0 or b.height!=0]
if bb:
_bbox = mtransforms.Bbox.union(bb)
return _bbox
else:
return None
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
if not self.get_visible(): return
renderer.open_group(__name__)
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw, renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1+labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1+labels2)
def get_ticklabels(self, minor=False):
'Return a list of Text instances for ticklabels'
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick instance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None: return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_label_text(self):
'Get the text of the label'
return self.label.get_text()
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean. Use *which* =
'major' | 'minor' | 'both' to set the grid for major or minor ticks.
If *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True.
*kwargs* are used to set the line properties of the grids, eg,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs): b = True
which = which.lower()
if which in ['minor', 'both']:
if b is None:
self._gridOnMinor = not self._gridOnMinor
else:
self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMinor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
self._minor_tick_kw['gridOn'] = self._gridOnMinor
if which in ['major', 'both']:
if b is None:
self._gridOnMajor = not self._gridOnMajor
else:
self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMajor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
self._major_tick_kw['gridOn'] = self._gridOnMajor
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter!=converter
self.converter = converter
default = self.converter.default_units(data, self)
#print 'update units: default=%s, units=%s'%(default, self.units)
if default is not None and self.units is None:
self.set_units(default)
if neednew:
self._update_axisinfo()
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and self.major.locator!=info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and self.minor.locator!=info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and self.major.formatter!=info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and self.minor.formatter!=info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
#print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)
return x
ret = self.converter.convert(x, self.units, self)
#print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u!=self.units:
self.units = u
#print 'setting units', self.converter, u, munits.registry.get_converter(u)
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
def get_units(self):
'return the units for axis'
return self.units
def set_label_text(self, label, fontdict = None, **kwargs):
""" Sets the text value of the axis label
ACCEPTS: A string value for the label
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None: self.label.update(fontdict)
self.label.update(kwargs)
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_majfmt = False
self.major.formatter = formatter
formatter.set_axis(self)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_minfmt = False
self.minor.formatter = formatter
formatter.set_axis(self)
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_majloc = False
self.major.locator = locator
locator.set_axis(self)
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_minloc = False
self.minor.locator = locator
locator.set_axis(self)
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
All other kwargs are used to update the text object properties.
As for get_ticklabels, label1 (left or bottom) is
affected for a given tick only if its label1On attribute
is True, and similarly for label2. The list of returned
label text objects consists of all such label1 objects followed
by all such label2 objects.
The input *ticklabels* is assumed to match the set of
tick locations, regardless of the state of label1On and
label2On.
ACCEPTS: sequence of strings
"""
#ticklabels = [str(l) for l in ticklabels]
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_major_ticks()
ret1 = []
ret2 = []
for i, tick in enumerate(ticks):
if i<len(ticklabels):
if tick.label1On:
tick.label1.set_text(ticklabels[i])
tick.label1.update(kwargs)
ret1.append(tick.label1)
if tick.label2On:
tick.label2.set_text(ticklabels[i])
ret2.append(tick.label2)
tick.label2.update(kwargs)
return ret1 + ret2
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
### XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator( mticker.FixedLocator(ticks) )
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
def axis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a :class:`tzinfo` instance or a timezone string.
This timezone is used to create date labels.
"""
# By providing a sample datetime instance with the desired
# timezone, the registered converter can be selected,
# and the "units" attribute, which is the timezone, can
# be set.
import datetime
if isinstance(tz, (str, unicode)):
import pytz
tz = pytz.timezone(tz)
self.update_units(datetime.datetime(2009,1,1,0,0,0,0,tz))
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self,mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = xaxes>=0 and xaxes<=1 and (
(y<b and y>b-self.pickradius) or
(y>t and y<t+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties = font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color = rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center',
)
label.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(label)
self.label_position='bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties = font_manager.FontProperties(
size=rcParams['xtick.labelsize']),
color = rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right',
)
offsetText.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(offsetText)
self.offset_text_position='bottom'
return offsetText
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
assert position == 'top' or position == 'bottom'
if position == 'top':
self.label.set_verticalalignment('baseline')
else:
self.label.set_verticalalignment('top')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.label.set_position( (x, bottom - self.labelpad*self.figure.dpi / 72.0))
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
self.label.set_position( (x, top+self.labelpad*self.figure.dpi / 72.0))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
else:
raise ValueError("invalid position: %s" % position)
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorTop and minorTop: return 'top'
MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if MajorBottom and MinorBottom: return 'bottom'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_xlim`.
"""
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
xmin, xmax = 0., 1.
dataMutated = self.axes.dataLim.mutatedx()
viewMutated = self.axes.viewLim.mutatedx()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
xmin = self.converter.convert(valmin, self.units, self)
xmax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervalx = xmin, xmax
if not viewMutated:
self.axes.viewLim.intervalx = xmin, xmax
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = yaxes>=0 and yaxes<=1 and (
(x<l and x>l-self.pickradius) or
(x>r and x<r+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color = rcParams['axes.labelcolor'],
verticalalignment='center',
horizontalalignment='right',
rotation='vertical',
)
label.set_transform( mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes) )
self._set_artist_props(label)
self.label_position='left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties = font_manager.FontProperties(
size=rcParams['ytick.labelsize']),
color = rcParams['ytick.color'],
verticalalignment = 'baseline',
horizontalalignment = 'left',
)
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()) )
self._set_artist_props(offsetText)
self.offset_text_position='left'
return offsetText
def get_label_position(self):
"""
Return the label position (left or right)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
assert position == 'left' or position == 'right'
if position == 'right':
self.label.set_horizontalalignment('left')
else:
self.label.set_horizontalalignment('right')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'left':
if not len(bboxes):
left = self.axes.bbox.xmin
else:
bbox = mtransforms.Bbox.union(bboxes)
left = bbox.x0
self.label.set_position( (left-self.labelpad*self.figure.dpi/72.0, y))
else:
if not len(bboxes2):
right = self.axes.bbox.xmax
else:
bbox = mtransforms.Bbox.union(bboxes2)
right = bbox.x1
self.label.set_position( (right+self.labelpad*self.figure.dpi/72.0, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def set_offset_position(self, position):
assert position == 'left' or position == 'right'
x,y = self.offsetText.get_position()
if position == 'left': x = 0
else: x = 1
self.offsetText.set_ha(position)
self.offsetText.set_position((x,y))
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both, default or none)
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
raise ValueError("invalid position: %s" % position)
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorRight and minorRight: return 'right'
majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if majorLeft and minorLeft: return 'left'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_ylim`.
"""
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
ymin, ymax = 0., 1.
dataMutated = self.axes.dataLim.mutatedy()
viewMutated = self.axes.viewLim.mutatedy()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
ymin = self.converter.convert(valmin, self.units, self)
ymax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervaly = ymin, ymax
if not viewMutated:
self.axes.viewLim.intervaly = ymin, ymax
| gpl-3.0 |
bollu/sandhi | modules/gr36/gr-filter/examples/interpolate.py | 13 | 8584 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/learning_curve.py | 35 | 15441 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| mit |
CalebBell/thermo | tests/test_datasheet.py | 1 | 8705 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
import pandas as pd
from thermo.datasheet import *
from thermo.chemical import Chemical
# These tests slow down implementation of new methods too much.
#@pytest.mark.meta_Chemical
#def test_tabulate_solid():
# df = tabulate_solid('sodium hydroxide', pts=2)
# df_as_dict = {'Constant-pressure heat capacity, J/kg/K': {496.14999999999998: 1267.9653086278533, 596.14999999999998: 1582.2714391628249}, 'Density, kg/m^3': {496.14999999999998: 2130.0058046853483, 596.14999999999998: 2130.0058046853483}}
# pd.util.testing.assert_frame_equal(pd.DataFrame(df_as_dict), pd.DataFrame(df.to_dict()))
#
#
#@pytest.mark.meta_Chemical
#def test_tabulate_gas():
# df = tabulate_gas('hexane', pts=2)
# df_as_dict = {'Constant-pressure heat capacity, J/kg/K': {178.07499999999999: 1206.4098393032568, 507.60000000000002: 2551.4044899160472}, 'Viscosity, Pa*S': {178.07499999999999: 3.6993265691382959e-06, 507.60000000000002: 1.0598974706090609e-05}, 'Isentropic exponent': {178.07499999999999: 1.0869273799268073, 507.60000000000002: 1.0393018803424154}, 'Joule-Thompson expansion coefficient, K/Pa': {178.07499999999999: 0.00016800664986363302, 507.60000000000002: 7.8217064543503734e-06}, 'Isobaric expansion, 1/K': {178.07499999999999: 0.015141550023997695, 507.60000000000002: 0.0020523335027846585}, 'Prandtl number': {178.07499999999999: 0.69678226644585661, 507.60000000000002: 0.74170212695888871}, 'Density, kg/m^3': {178.07499999999999: 8.3693048957953522, 507.60000000000002: 2.0927931856300876}, 'Constant-volume heat capacity, J/kg/K': {178.07499999999999: 1109.9268098154776, 507.60000000000002: 2454.9214604282679}, 'Thermal diffusivity, m^2/s': {178.07499999999999: 6.3436058798806709e-07, 507.60000000000002: 6.8282280730497638e-06}, 'Thermal conductivity, W/m/K': {178.07499999999999: 0.0064050194540236464, 507.60000000000002: 0.036459746670141478}}
# pd.util.testing.assert_frame_equal(pd.DataFrame(df_as_dict), pd.DataFrame(df.to_dict()))
#
#
#@pytest.mark.meta_Chemical
#def test_tabulate_liq():
# df = tabulate_liq('hexane', Tmin=280, Tmax=350, pts=2)
# df_as_dict = {'Constant-pressure heat capacity, J/kg/K': {280.0: 2199.5376248501448, 350.0: 2509.3959378687496}, 'Viscosity, Pa*S': {280.0: 0.0003595695325135477, 350.0: 0.00018618849649397316}, 'Saturation pressure, Pa': {280.0: 8624.370564055087, 350.0: 129801.09838575375}, 'Joule-Thompson expansion coefficient, K/Pa': {280.0: 3.4834926941752087e-05, 350.0: 3.066272687922139e-05}, 'Surface tension, N/m': {280.0: 0.019794991465879444, 350.0: 0.01261221127458579}, 'Prandtl number': {280.0: 6.2861632870484234, 350.0: 4.5167171403747597}, 'Isobaric expansion, 1/K': {280.0: 0.001340989794772991, 350.0: 0.0016990766161286714}, 'Density, kg/m^3': {280.0: 671.28561912698535, 350.0: 606.36768482956563}, 'Thermal diffusivity, m^2/s': {280.0: 8.5209866345631262e-08, 350.0: 6.7981994628212491e-08}, 'Heat of vaporization, J/kg': {280.0: 377182.42886698805, 350.0: 328705.97080247721}, 'PermittivityLiquid': {280.0: 1.8865000000000001, 350.0: 1.802808}, 'Thermal conductivity, W/m/K': {280.0: 0.12581389941664639, 350.0: 0.10344253187860687}}
# pd.util.testing.assert_frame_equal(pd.DataFrame(df_as_dict), pd.DataFrame(df.to_dict()))
#
#
#@pytest.mark.meta_Chemical
#def test_constants():
# # TODO: Hsub again so that works
# df = tabulate_constants('hexane')
# df_as_dict = {'Heat of vaporization at Tb, J/mol': {'hexane': 28862.311605415733}, 'Time-weighted average exposure limit': {'hexane': "(50.0, 'ppm')"}, 'Tc, K': {'hexane': 507.60000000000002}, 'Short-term exposure limit': {'hexane': 'None'}, 'Molecular Diameter, Angstrom': {'hexane': 5.6184099999999999}, 'Zc': {'hexane': 0.26376523052422041}, 'Tm, K': {'hexane': 178.07499999999999}, 'Heat of fusion, J/mol': {'hexane': 13080.0}, 'Tb, K': {'hexane': 341.87}, 'Stockmayer parameter, K': {'hexane': 434.75999999999999}, 'MW, g/mol': {'hexane': 86.175359999999998}, 'Refractive index': {'hexane': 1.3727}, 'rhoC, kg/m^3': {'hexane': 234.17217391304345}, 'Heat of formation, J/mol': {'hexane': -166950.0}, 'Pc, Pa': {'hexane': 3025000.0}, 'Lower flammability limit, fraction': {'hexane': 0.01}, 'logP': {'hexane': 4.0}, 'Upper flammability limit, fraction': {'hexane': 0.08900000000000001}, 'Dipole moment, debye': {'hexane': 0.0}, 'Triple temperature, K': {'hexane': 177.84}, 'Acentric factor': {'hexane': 0.29749999999999999}, 'Triple pressure, Pa': {'hexane': 1.1747772750450831}, 'Autoignition temperature, K': {'hexane': 498.14999999999998}, 'Vc, m^3/mol': {'hexane': 0.000368}, 'CAS': {'hexane': '110-54-3'}, 'Formula': {'hexane': 'C6H14'}, 'Flash temperature, K': {'hexane': 251.15000000000001}, 'Heat of sublimation, J/mol': {'hexane': None}}
#
# pd.util.testing.assert_frame_equal(pd.DataFrame(df_as_dict), pd.DataFrame(df.to_dict()))
#
# df = tabulate_constants(['hexane', 'toluene'], full=True, vertical=True)
# df_as_dict = {'hexane': {'Electrical conductivity, S/m': 1e-16, 'Global warming potential': None, 'InChI key': 'VLKZOEOYAKHREP-UHFFFAOYSA-N', 'Heat of vaporization at Tb, J/mol': 28862.311605415733, 'Time-weighted average exposure limit': "(50.0, 'ppm')", 'Tc, K': 507.6, 'Short-term exposure limit': 'None', 'Molecular Diameter, Angstrom': 5.61841, 'Formula': 'C6H14', 'InChI': 'C6H14/c1-3-5-6-4-2/h3-6H2,1-2H3', 'Parachor': 272.1972168105559, 'Heat of fusion, J/mol': 13080.0, 'Tb, K': 341.87, 'Stockmayer parameter, K': 434.76, 'IUPAC name': 'hexane', 'Refractive index': 1.3727, 'Tm, K': 178.075, 'solubility parameter, Pa^0.5': 14848.17694628013, 'Heat of formation, J/mol': -166950.0, 'Pc, Pa': 3025000.0, 'Lower flammability limit, fraction': 0.01, 'Vc, m^3/mol': 0.000368, 'Upper flammability limit, fraction': 0.08900000000000001, 'Dipole moment, debye': 0.0, 'MW, g/mol': 86.17536, 'Acentric factor': 0.2975, 'rhoC, kg/m^3': 234.17217391304345, 'Zc': 0.2637652305242204, 'Triple pressure, Pa': 1.1747772750450831, 'Autoignition temperature, K': 498.15, 'CAS': '110-54-3', 'smiles': 'CCCCCC', 'Flash temperature, K': 251.15, 'Ozone depletion potential': None, 'logP': 4.0, 'Heat of sublimation, J/mol': None, 'Triple temperature, K': 177.84}, 'toluene': {'Electrical conductivity, S/m': 1e-12, 'Global warming potential': None, 'InChI key': 'YXFVVABEGXRONW-UHFFFAOYSA-N', 'Heat of vaporization at Tb, J/mol': 33233.94544167449, 'Time-weighted average exposure limit': "(20.0, 'ppm')", 'Tc, K': 591.75, 'Short-term exposure limit': 'None', 'Molecular Diameter, Angstrom': 5.4545, 'Formula': 'C7H8', 'InChI': 'C7H8/c1-7-5-3-2-4-6-7/h2-6H,1H3', 'Parachor': 246.76008384965857, 'Heat of fusion, J/mol': 6639.9999999999991, 'Tb, K': 383.75, 'Stockmayer parameter, K': 350.74, 'IUPAC name': 'methylbenzene', 'Refractive index': 1.4941, 'Tm, K': 179.2, 'solubility parameter, Pa^0.5': 18242.232319337778, 'Heat of formation, J/mol': 50170.0, 'Pc, Pa': 4108000.0, 'Lower flammability limit, fraction': 0.01, 'Vc, m^3/mol': 0.00031600000000000004, 'Upper flammability limit, fraction': 0.078, 'Dipole moment, debye': 0.33, 'MW, g/mol': 92.13842, 'Acentric factor': 0.257, 'rhoC, kg/m^3': 291.5772784810126, 'Zc': 0.26384277925843774, 'Triple pressure, Pa': 0.04217711401906639, 'Autoignition temperature, K': 803.15, 'CAS': '108-88-3', 'smiles': 'CC1=CC=CC=C1', 'Flash temperature, K': 277.15, 'Ozone depletion potential': None, 'logP': 2.73, 'Heat of sublimation, J/mol': None, 'Triple temperature, K': 179.2}}
# pd.util.testing.assert_frame_equal(pd.DataFrame(df_as_dict), pd.DataFrame(df.to_dict()))
| mit |
readevalprint/zipline | tests/finance/test_slippage.py | 3 | 16080 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for finance.slippage
"""
import datetime
import pytz
from unittest import TestCase
from nose_parameterized import parameterized
import pandas as pd
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.blotter import Order
class SlippageTestCase(TestCase):
def test_volume_share_slippage(self):
event = Event(
{'volume': 200,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0}
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=133)
]
orders_txns = list(slippage_model.simulate(
event,
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.01875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
def test_orders_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
expected_txn = {}
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.001,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 100,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.99925,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -100,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
order = Order(**order_data)
event = Event(initial_values=event_data)
slippage_model = VolumeShareSlippage()
try:
_, txn = slippage_model.simulate(event, [order]).next()
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.0})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 4.0})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def gen_trades(self):
# create a sequence of trades
events = [
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 4.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 4.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'open': 3.5
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'open': 4.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'open': 3.5
})
]
return events
| apache-2.0 |
MPIBGC-TEE/CompartmentalSystems | src/CompartmentalSystems/bins/TsMassFieldsPerTimeStep.py | 1 | 3427 | # vim: set ff=unix expandtab ts=4 sw=4:
import numpy as np
from sympy import latex
from .FieldsPerTimeStep import FieldsPerTimeStep
from matplotlib import cm
import matplotlib.pyplot as plt
class TsMassFieldsPerTimeStep(FieldsPerTimeStep):
@property
def max_number_of_Ts_entries(self):
return(max([v.number_of_Ts_entries for v in self]))
@property
def max_Ts(self):
return(self.tss*self.max_number_of_Ts_entries)
#fixme: treatment of units
def plot_bins(self,ax,mr=None,pool=None):
tss=self.tss
times=self.times
z_max=max([vec.arr.max() for vec in self])
#print(max([vec.arr.max() for vec in self]))
#print(min([vec.arr.min() for vec in self]))
for i,vec in enumerate(self):
vec.plot_bins(ax,self.times[i])
ax.set_ylim(self.t_min,self.t_max*1.05)
#ax.set_ylim(self.t_min,(self.t_max+tss)*1.05)
#ax.invert_yaxis()
ax.set_xlim(0,self.max_Ts*1.05)
ax.set_zlim(0,z_max)
ax.invert_xaxis()
# fixme mm 31.01.2018
# the model objects of CompartmentalSystems have no units
# therefore the whole function does not work
#self.set_ticks_and_labels(ax,mr,pool)
# fixme mm 31.01.2018
# the model objects of CompartmentalSystems have no units
# therefore the whole function does not work
#def set_ticks_and_labels(self,ax,mr=None,pool=None, fontsize=20):
# #fixme:
# # no ticksetting yet
# if mr and mr.model.time_unit:
# ax.set_xlabel("System age ($" + latex(mr.model.time_unit) + "$)", fontsize=fontsize)
# ax.set_ylabel("time ($" + latex(mr.model.time_unit) + "$)", fontsize=fontsize)
# else:
# ax.set_xlabel("system age")
# ax.set_ylabel("time")
# if mr and (pool != None) and mr.model.units and mr.model.units[pool]:
# pool_unit = mr.model.units[pool]
#
# if pool_unit:
# ax.set_zlabel("content ($" + latex(pool_unit) + "$)", fontsize=fontsize)
# else:
# ax.set_zlabel("content")
# ax.xaxis.labelpad = 20
# ax.yaxis.labelpad = 20
# ax.zaxis.labelpad = 15
def plot_surface(self,ax,mr=None,pool=None):
times=self.times
Ts_max_index=max([vec.shape[0] for vec in self])
z_max=max([vec.arr.max() for vec in self])
tss=self.tss
systemAges =np.arange(Ts_max_index)*tss
X,Y=np.meshgrid(
systemAges,
times,
indexing="ij" # see help of meshgrid
)
Z=np.ndarray((Ts_max_index,len(times)))*np.NaN
for i,vec in enumerate(self):
l=vec.shape[0]
Z[:l,i]=vec.arr
ax.plot_surface(
X, Y, Z,
rstride=1,
cstride=1,
#color="y",
linewidth=0.0,
cmap=cm.coolwarm,
norm=plt.Normalize(0,z_max),
antialiased=False
)
#ax.plot_wireframe(X, Y, Z,cmap=cm.coolwarm,norm=plt.Normalize(0,z_max),linewidth=0.3)
#ax.plot_surface(X, Y, Z,cmap=cm.coolwarm,linewidth=0.1,antialiased=False)
ax.set_xlim(0,self.max_Ts*1.05)
ax.set_ylim(self.t_min,(self.t_max+tss)*1.05)
ax.invert_yaxis()
self.set_ticks_and_labels(ax,mr,pool)
#print(ax.get_zlim())
| mit |
anntzer/scikit-learn | asv_benchmarks/benchmarks/model_selection.py | 12 | 2511 | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score
from .common import Benchmark, Estimator, Predictor
from .datasets import _synth_classification_dataset
from .utils import make_gen_classif_scorers
class CrossValidationBenchmark(Benchmark):
"""
Benchmarks for Cross Validation.
"""
timeout = 20000
param_names = ['n_jobs']
params = (Benchmark.n_jobs_vals,)
def setup(self, *params):
n_jobs, = params
data = _synth_classification_dataset(n_samples=50000, n_features=100)
self.X, self.X_val, self.y, self.y_val = data
self.clf = RandomForestClassifier(n_estimators=50,
max_depth=10,
random_state=0)
cv = 16 if Benchmark.data_size == 'large' else 4
self.cv_params = {'n_jobs': n_jobs,
'cv': cv}
def time_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def peakmem_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def track_crossval(self, *args):
return float(cross_val_score(self.clf, self.X,
self.y, **self.cv_params).mean())
class GridSearchBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GridSearch.
"""
timeout = 20000
param_names = ['n_jobs']
params = (Benchmark.n_jobs_vals,)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000, n_features=100)
return data
def make_estimator(self, params):
n_jobs, = params
clf = RandomForestClassifier(random_state=0)
if Benchmark.data_size == 'large':
n_estimators_list = [10, 25, 50, 100, 500]
max_depth_list = [5, 10, None]
max_features_list = [0.1, 0.4, 0.8, 1.0]
else:
n_estimators_list = [10, 25, 50]
max_depth_list = [5, 10]
max_features_list = [0.1, 0.4, 0.8]
param_grid = {'n_estimators': n_estimators_list,
'max_depth': max_depth_list,
'max_features': max_features_list}
estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| bsd-3-clause |
jorik041/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/cluster/spectral.py | 18 | 18027 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
johnbachman/ras_model | gxp_exchange.py | 6 | 2587 | from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
KRAS = model.monomers['KRAS']
GDP = model.monomers['GDP']
GTP = model.monomers['GTP']
Expression('KRAS_mGXP_', model.observables['KRAS_mGTP_closed_'] +
model.observables['KRAS_mGDP_closed_'])
# Add an initial condition for HRAS with GDP or GTP pre-bound
# (Concentration units in nM)
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GDP(p=1, label='n'),
Parameter('KRAS_WT_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GDP(p=1, label='n'),
Parameter('KRAS_G13D_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GTP(p=1, label='n'),
Parameter('KRAS_WT_GTP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GTP(p=1, label='n'),
Parameter('KRAS_G13D_GTP_0', 0.))
plt.ion()
# First simulate the data from Figure 1A (GDP exchange)
# WT, GDP:
model.parameters['mGDP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 750.
t = np.linspace(0, 1000, 1000) # 1000 seconds
sol = Solver(model, t)
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GDP:
model.parameters['KRAS_WT_GDP_0'].value = 0
model.parameters['KRAS_G13D_GDP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GDP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGDP] (nM)')
plt.show()
# Now simulate the data from Figure 1B (GTP exchange)
# WT, GTP
model.parameters['mGDP_0'].value = 0.
model.parameters['mGTP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 0.
model.parameters['KRAS_G13D_GDP_0'].value = 0.
model.parameters['KRAS_WT_GTP_0'].value = 750.
model.parameters['KRAS_G13D_GTP_0'].value = 0.
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GTP
model.parameters['KRAS_WT_GTP_0'].value = 0.
model.parameters['KRAS_G13D_GTP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GTP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGTP] (nM)')
plt.show()
| mit |
bsipocz/ginga | ginga/mplw/MplHelp.py | 2 | 2894 | #
# MplHelp.py -- help classes for Matplotlib drawing
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
from ginga import colors
import matplotlib.textpath as textpath
class MplContext(object):
def __init__(self, axes):
self.axes = axes
self.kwdargs = dict()
def set_canvas(self, axes):
self.axes = axes
def init(self, **kwdargs):
self.kwdargs = dict()
self.kwdargs.update(kwdargs)
def set(self, **kwdargs):
self.kwdargs.update(kwdargs)
def update_fill(self, obj):
if hasattr(obj, 'fill'):
self.kwdargs['fill'] = obj.fill
if obj.fill:
self.kwdargs['facecolor'] = self.get_color(obj.fillcolor)
else:
self.kwdargs['facecolor'] = self.get_color(obj.color)
def update_line(self, obj):
self.kwdargs['color'] = self.get_color(obj.color)
if hasattr(obj, 'linewidth'):
self.kwdargs['linewidth'] = obj.linewidth
if hasattr(obj, 'linestyle'):
self.kwdargs['linestyle'] = obj.linestyle
def update_patch(self, obj):
self.kwdargs['edgecolor'] = self.get_color(obj.color)
if hasattr(obj, 'linewidth'):
self.kwdargs['linewidth'] = obj.linewidth
if hasattr(obj, 'linestyle'):
self.kwdargs['linestyle'] = obj.linestyle
self.update_fill(obj)
def update_text(self, obj):
self.kwdargs['color'] = self.get_color(obj.color)
if hasattr(obj, 'font'):
self.kwdargs['family'] = obj.font
if hasattr(obj, 'fontsize'):
self.kwdargs['size'] = obj.fontsize
def get_color(self, color):
if isinstance(color, str):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
#return (int(r*255), int(g*255), int(b*255))
return (r, g, b)
def get_font(self, name, size, color):
color = self.get_color(color)
fontdict = dict(color=color, family=name, size=size,
transform=None)
return fontdict
def text_extents(self, text, font):
size = font.get('size', 16)
name = font.get('name', 'Sans')
# This is not completely accurate because it depends a lot
# on the renderer used, but that is complicated under Mpl
t = textpath.TextPath((0, 0), text, size=size, prop=name)
bb = t.get_extents()
wd, ht = bb.width, bb.height
return (wd, ht)
#END
| bsd-3-clause |
tornadoalert/Retinet | models/basic_model.py | 1 | 17510 | import os
import numpy as np
import pandas as p
import theano.tensor as T
import lasagne as nn
from lasagne.layers import dnn
from lasagne.nonlinearities import LeakyRectify
from layers import ApplyNonlinearity
from utils import (oversample_set,
get_img_ids_from_dir,
softmax,
split_data)
from losses import (log_loss,
accuracy_loss,
quad_kappa_loss,
quad_kappa_log_hybrid_loss,
quad_kappa_log_hybrid_loss_clipped)
# Main dir used to load files.
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
output_size = 512 # 120
batch_size = 64 # * 2 # * 4
input_height, input_width = (output_size, output_size)
output_dim = 5
num_channels = 3
config_name = 'local_normal_' + str(output_size)
prefix_train = '/media/user/Extended_ext4/train_ds2_crop/'
prefix_test = '/media/user/Extended_ext4/test_ds2_crop/'
# ( image
# level
# 0 25810
# 1 2443
# 2 5292
# 3 873
# 4 708, image
# level
# 0 0.734783
# 1 0.069550
# 2 0.150658
# 3 0.024853
# 4 0.020156)
chunk_size = 128 # * 2 # * 2
num_chunks_train = 30000 // chunk_size * 200
validate_every = num_chunks_train // 50
output_every = num_chunks_train // 400
save_every = num_chunks_train // 200
buffer_size = 3
num_generators = 3
default_transfo_params = {'rotation': True, 'rotation_range': (0, 360),
'contrast': True, 'contrast_range': (0.7, 1.3),
'brightness': True, 'brightness_range': (0.7, 1.3),
'color': True, 'color_range': (0.7, 1.3),
'flip': True, 'flip_prob': 0.5,
'crop': True, 'crop_prob': 0.4,
'crop_w': 0.03, 'crop_h': 0.04,
'keep_aspect_ratio': False,
'resize_pad': False,
'zoom': True, 'zoom_prob': 0.5,
'zoom_range': (0.00, 0.05),
'paired_transfos': False,
'rotation_expand': False,
'crop_height': False,
'extra_width_crop': True,
'rotation_before_resize': False,
'crop_after_rotation': True}
no_transfo_params = {'keep_aspect_ratio':
default_transfo_params['keep_aspect_ratio'],
'resize_pad':
default_transfo_params['resize_pad'],
'extra_width_crop':
default_transfo_params['extra_width_crop'],
'rotation_before_resize':
default_transfo_params['rotation_before_resize'],
'crop_height':
default_transfo_params['crop_height'],
}
pixel_based_norm = False
paired_transfos = True
SEED = 1
sample_coefs = [0, 7, 3, 22, 25]
# [0, 7, 3, 22, 25] gives more even [0.25. 0.19. 0.20. 0.19. 0.18] distribution
switch_chunk = 60 * num_chunks_train // 100
leakiness = 0.5
obj_loss = 'kappalogclipped'
y_pow = 1
# Kappalog
log_scale = 0.50
log_offset = 0.50
# Kappalogclipped
log_cutoff = 0.80
lambda_reg = 0.0002
lr_scale = 6.00
LEARNING_RATE_SCHEDULE = {
1: 0.0010 * lr_scale,
num_chunks_train // 100 * 30: 0.00050 * lr_scale,
num_chunks_train // 100 * 50: 0.00010 * lr_scale,
num_chunks_train // 100 * 85: 0.00001 * lr_scale,
num_chunks_train // 100 * 95: 0.000001 * lr_scale,
}
momentum = 0.90
def build_model():
layers = []
l_in_imgdim = nn.layers.InputLayer(
shape=(batch_size, 2),
name='imgdim'
)
l_in1 = nn.layers.InputLayer(
shape=(batch_size, num_channels, input_width, input_height),
name='images'
)
layers.append(l_in1)
Conv2DLayer = dnn.Conv2DDNNLayer
MaxPool2DLayer = dnn.MaxPool2DDNNLayer
DenseLayer = nn.layers.DenseLayer
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(7, 7), stride=(2, 2),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
# l_conv = Conv2DLayer(layers[-1],
# num_filters=32, filter_size=(3, 3), stride=(1, 1),
# border_mode='same',
# nonlinearity=LeakyRectify(leakiness),
# W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
# untie_biases=True,
# learning_rate_scale=1.0)
# layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=64, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=64, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
# l_conv = Conv2DLayer(layers[-1],
# num_filters=64, filter_size=(3, 3), stride=(1, 1),
# border_mode='same',
# nonlinearity=LeakyRectify(leakiness),
# W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
# untie_biases=True,
# learning_rate_scale=1.0)
# layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2),
name='coarse_last_pool')
layers.append(l_pool)
layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
layers.append(DenseLayer(layers[-1],
nonlinearity=None,
num_units=1024,
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1),
name='first_fc_0'))
l_pool = nn.layers.FeaturePoolLayer(layers[-1],
pool_size=2,
pool_function=T.max)
layers.append(l_pool)
l_first_repr = layers[-1]
l_coarse_repr = nn.layers.concat([l_first_repr,
l_in_imgdim])
layers.append(l_coarse_repr)
# Combine representations of both eyes.
layers.append(
nn.layers.ReshapeLayer(layers[-1], shape=(batch_size // 2, -1)))
layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.layers.DenseLayer(layers[-1],
nonlinearity=None,
num_units=1024,
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1),
name='combine_repr_fc'))
l_pool = nn.layers.FeaturePoolLayer(layers[-1],
pool_size=2,
pool_function=T.max)
layers.append(l_pool)
l_hidden = nn.layers.DenseLayer(nn.layers.DropoutLayer(layers[-1], p=0.5),
num_units=output_dim * 2,
nonlinearity=None, # No softmax yet!
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1))
layers.append(l_hidden)
# Reshape back to 5.
layers.append(nn.layers.ReshapeLayer(layers[-1],
shape=(batch_size, 5)))
# Apply softmax.
l_out = ApplyNonlinearity(layers[-1],
nonlinearity=nn.nonlinearities.softmax)
layers.append(l_out)
l_ins = [l_in1, l_in_imgdim]
return l_out, l_ins
config_name += '_' + obj_loss
if obj_loss == 'kappalog':
config_name += '_logscale_' + str(log_scale)
config_name += '_logoffset_' + str(log_offset)
elif 'kappalogclipped' in obj_loss:
config_name += '_logcutoff_' + str(log_cutoff)
config_name += '_reg_' + str(lambda_reg)
if obj_loss == 'log':
loss_function = log_loss
elif obj_loss == 'acc':
loss_function = accuracy_loss
elif obj_loss == 'kappa':
def loss(y, t):
return quad_kappa_loss(y, t,
y_pow=y_pow)
loss_function = loss
elif obj_loss == 'kappalog':
def loss(y, t):
return quad_kappa_log_hybrid_loss(y, t,
y_pow=y_pow,
log_scale=log_scale,
log_offset=log_offset)
loss_function = loss
elif obj_loss == 'kappalogclipped':
def loss(y, t):
return quad_kappa_log_hybrid_loss_clipped(y, t,
y_pow=y_pow,
log_cutoff=log_cutoff)
loss_function = loss
else:
raise ValueError("Need obj_loss param.")
def build_objective(l_out, loss_function=loss_function,
lambda_reg=lambda_reg):
params = nn.layers.get_all_params(l_out, regularizable=True)
reg_term = sum(T.sum(p ** 2) for p in params)
def loss(y, t):
return loss_function(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
train_labels = p.read_csv(os.path.join(base_dir, 'data/trainLabels.csv'))
labels_split = p.DataFrame(list(train_labels.image.str.split('_')),
columns=['id', 'eye'])
labels_split['level'] = train_labels.level
labels_split['id'] = labels_split['id'].astype('int')
id_train, y_train, id_valid, y_valid = split_data(train_labels, labels_split,
valid_size=10,
SEED=SEED, pairs=True)
# Change train dataset to oversample other labels.
# Total sizes:
# ( image
# level
# 0 25810
# 1 2443
# 2 5292
# 3 873
# 4 708, image
# level
# 0 0.734783
# 1 0.069550
# 2 0.150658
# 3 0.024853
# 4 0.020156)
pl_enabled = True
pl_softmax_temp = 2
pl_train_coef = 5
pl_train_fn = ''
pl_test_fn = ''
pl_log = False
if pl_enabled:
pl_test_fn = '2015_07_14_072437_6_log_mean.npy'
test_preds = np.load(os.path.join(base_dir, 'preds/' + pl_test_fn))
if test_preds.shape[1] > 5:
test_preds = test_preds[:, -5:].astype('float32')
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
print "Orig test preds:\n\n"
print test_preds[:10], '\n'
if np.mean(test_preds) > 0:
# These are not log probs, so can do log.
test_preds = np.log(1e-5 + test_preds)
test_probs = softmax(test_preds, temp=pl_softmax_temp)
# Double ids so only every other.
images_test_pl = sorted(set(get_img_ids_from_dir(prefix_test)))
labels_test_pl = test_probs.reshape((-1, 2, 5))
print "\nImages for test:\n\n"
print images_test_pl[:5], '\n'
print "\nLabels for test:\n\n"
print labels_test_pl[:5], '\n'
# Add only test PL for now.
id_train_oversample, labels_train_oversample = oversample_set(id_train,
y_train,
sample_coefs)
# First train set.
images_train_0 = list(id_train_oversample) + images_test_pl
labels_train_pl = np.eye(5)[
list(labels_train_oversample.flatten().astype('int32'))
].reshape((-1, 2, 5))
labels_train_0 = np.vstack([labels_train_pl,
labels_test_pl]).astype('float32')
# Second train set.
images_train_1 = list(id_train) * pl_train_coef + images_test_pl
labels_train_pl = np.eye(5)[
list(y_train.flatten().astype('int32')) * pl_train_coef
].reshape((-1, 2, 5))
labels_train_1 = np.vstack([labels_train_pl,
labels_test_pl]).astype('float32')
images_train_eval = id_train[:]
labels_train_eval = y_train[:].astype('int32')
images_valid_eval = id_valid[:]
labels_valid_eval = y_valid[:].astype('int32')
else:
id_train_oversample, labels_train_oversample = oversample_set(id_train,
y_train,
sample_coefs)
images_train_0 = id_train_oversample
labels_train_0 = labels_train_oversample.astype('int32')
images_train_1, labels_train_1 = id_train, y_train.astype('int32')
images_train_eval = id_train[:]
labels_train_eval = y_train[:].astype('int32')
images_valid_eval = id_valid[:]
labels_valid_eval = y_valid[:].astype('int32')
| mit |
ytsvetko/adjective_supersense_classifier | src/classify.py | 1 | 9656 | #!/usr/bin/env python2.7
from __future__ import division
from itertools import izip_longest
import sys
import codecs
import argparse
import json
from sklearn import svm
from sklearn import cross_validation
from sklearn import ensemble
from collections import Counter
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--train_features", required=True)
parser.add_argument("--train_labels", required=True)
parser.add_argument("--priors", default="{}",
help="A dictionary of {'Label':prior, ... } or a string 'balanced'. " +
"Asumes a prior of 1.0 for unspecified labels.")
parser.add_argument("--test_features")
parser.add_argument("--test_predicted_labels_out")
parser.add_argument("--golden_labels")
parser.add_argument("--classifier", default="RandomForest")
parser.add_argument("--num_cross_validation_folds", default=0, type=int)
parser.add_argument("--write_posterior_probabilities", default=False, action='store_true')
parser.add_argument("--write_class_confidence", default=False, action='store_true')
args = parser.parse_args()
class StrEnumerator(object):
def __init__(self):
self.d = {}
self.labels = []
def __len__(self):
return len(self.labels)
def StrToNum(self, label):
if label in self.d:
return self.d[label]
num = len(self.labels)
self.d[label] = num
self.labels.append(label)
return num
def NumToStr(self, num):
return self.labels[num]
class Classifier(object):
def __init__(self):
self.classifier = self._NewClassifier()
self.labels_enum = StrEnumerator()
self.features_enum = StrEnumerator()
def _NewClassifier(self):
if args.classifier == "RandomForest":
return ensemble.RandomForestClassifier(n_estimators=400)
elif args.classifier == "SVM":
return svm.SVC(kernel='rbf', gamma=0.0001, C=1000000.0)
elif args.classifier == "GBRT":
return ensemble.GradientBoostingClassifier(
n_estimators=100, learning_rate=0.2, max_depth=2, random_state=0)
else:
assert False, ("Unknown classifier:", args.classifier)
def FromSparse(self, Xsparse):
X = []
for sparse in Xsparse:
full = [sparse.get(i, 0.0) for i in xrange(len(self.features_enum))]
X.append(full)
return X
def Train(self, Xsparse, y, priors):
X = self.FromSparse(Xsparse)
label_weights = np.array([priors.get(self.labels_enum.NumToStr(l), 1.0) for l in y])
self.classifier.fit(X, y, label_weights)
def Predict(self, Xsparse):
X = self.FromSparse(Xsparse)
return self.classifier.predict(X), self.classifier.predict_proba(X)
def LoadCregFeatFile(self, feat_filename):
Xsparse = []
instances = []
for features_line in codecs.open(feat_filename, "r", "utf-8"):
instance, feat_str = features_line.strip().split("\t")
instances.append(instance)
features = json.loads(feat_str)
Xsparse.append(dict({(self.features_enum.StrToNum(feat), val) for feat, val in features.iteritems()}))
return Xsparse, instances
def LoadCregLabelsFile(self, labels_filename):
y = []
instances = []
for labels_line in codecs.open(labels_filename, "r", "utf-8"):
instance, label = labels_line.strip().split("\t")
instances.append(instance)
y.append(self.labels_enum.StrToNum(label))
return y, instances
def SaveCregFeat(self, instances, x_list, feat_filename):
feat_file = codecs.open(feat_filename, "w", "utf-8")
for instance, x in izip_longest(instances, x_list):
features = {}
for i, val in enumerate(x):
features[self.features_enum.NumToStr(i)] = val
features_str = json.dumps(features, sort_keys=True)
feat_file.write(u"{}\t{}\n".format(instance, features_str))
def SaveCregLabels(self, instances, y_list, y_probabilities, write_class_confidence, labels_filename):
labels_file = codecs.open(labels_filename, "w", "utf-8")
y_prob_iter = [] if y_probabilities is None else y_probabilities
for instance, y, y_prob in izip_longest(instances, y_list, y_prob_iter):
label = self.labels_enum.NumToStr(y)
if y_probabilities is None:
labels_file.write(u"{}\t{}\n".format(instance, label))
else:
prob_dict = {}
for i, prob in enumerate(y_prob):
prob_dict[self.labels_enum.NumToStr(i)] = prob
prob_str = json.dumps(prob_dict, sort_keys=True)
if not write_class_confidence:
labels_file.write(u"{}\t{}\t{}\n".format(instance, label, prob_str))
else:
first, second = sorted(prob_dict.itervalues(), reverse=True)[:2]
confidence = first - second
labels_file.write(u"{}\t{}\t{}\t{}\n".format(instance, label, confidence, prob_str))
def CalcAccuracy(self, predicted_labels, golden_labels):
correct = 0
miss = 0
for p, g in izip_longest(predicted_labels, golden_labels):
if p == g:
correct += 1
else:
miss += 1
return correct / (correct + miss)
def WriteConfusionMatrix(self, predicted_labels, golden_labels, out_file):
confusion_matrix = Counter()
for predicted, golden in izip_longest(predicted_labels, golden_labels):
confusion_matrix[(self.labels_enum.NumToStr(predicted), self.labels_enum.NumToStr(golden))] += 1
out_file.write("Confusion Matrix:\nCorrect\\Predicted\n")
all_labels = sorted(self.labels_enum.labels)
out_file.write(u"\t{}".format("\t".join(all_labels)))
for label in all_labels:
out_file.write(u"\n{}".format(label))
for predicted_label in all_labels:
out_file.write(u"\t{:3}".format(confusion_matrix[(predicted_label, label)]))
out_file.write(u"\n")
def CrossValidate(self, Xsparse, y, num_folds, priors):
def GetAccuracy(test_posteriors, test_y):
def NormalizedHistogram(hist):
normalized = []
total = 0
for index in xrange(len(self.labels_enum.labels)):
total += hist[index]
normalized.append(total / len(test_y))
return normalized
hist = Counter()
for posteriors, expected in izip_longest(test_posteriors, test_y):
sorted_posteriors = sorted(enumerate(posteriors), key=lambda x: x[1], reverse=True)
sorted_labels = [k for k,v in sorted_posteriors]
try:
hist[sorted_labels.index(expected)] += 1
except ValueError:
pass # Did not see this label in training.
normalized_hist = NormalizedHistogram(hist)
print normalized_hist
accuracy = normalized_hist[0]
return accuracy
accuracy = []
X = np.array(self.FromSparse(Xsparse))
for train, test in cross_validation.StratifiedKFold(y, num_folds):
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
label_weights = np.array([priors.get(self.labels_enum.NumToStr(l), 1.0) for l in y_train])
classifier = self._NewClassifier()
classifier.fit(X_train, y_train, label_weights)
posteriors = classifier.predict_proba(X_test)
accuracy.append(GetAccuracy(posteriors, y_test))
return np.array(accuracy)
def LibraryCrossValidate(self, Xsparse, y, num_folds, priors):
# TODO: Support priors.
X = self.FromSparse(Xsparse)
return cross_validation.cross_val_score(self.classifier, X, y, cv=num_folds)
def CalculateBalancedPriors(self, y):
label_counts = Counter()
for label in y:
label_counts[label] += 1
total_count = 0
priors = {}
for label, count in label_counts.iteritems():
weight = len(y) - count
priors[self.labels_enum.NumToStr(label)] = weight
total_count += weight
for label in priors:
priors[label] /= total_count
return priors
def main():
classifier = Classifier()
# Training
X, x_instances = classifier.LoadCregFeatFile(args.train_features)
y, y_instances = classifier.LoadCregLabelsFile(args.train_labels)
if x_instances != y_instances:
for i, (x, y) in enumerate(izip_longest(x_instances, y_instances)):
assert (x==y), (i+1, x, y)
print "Num of labels:", len(classifier.labels_enum.labels)
print "Num training samples:", len(y)
print "Num of features:", len(classifier.features_enum.labels)
if args.priors == "balanced":
priors = classifier.CalculateBalancedPriors(y)
print "Using balanced priors:", priors
else:
priors = json.loads(args.priors)
print("Training")
if args.num_cross_validation_folds > 0:
scores = classifier.CrossValidate(X, np.array(y), args.num_cross_validation_folds, priors)
print("CV Accuracy: {:0.2} (+/- {:0.2}) [{}..{}]".format(
scores.mean(), scores.std() * 2, scores.min(), scores.max()))
if args.test_features is None:
return
# Testing
X_test, test_instances = classifier.LoadCregFeatFile(args.test_features)
classifier.Train(X, y, priors)
print("Testing")
test_predicted_labels, test_predicted_probabilities = classifier.Predict(X_test)
if args.test_predicted_labels_out:
out_probabilities = test_predicted_probabilities
if not args.write_posterior_probabilities:
out_probabilities = None
classifier.SaveCregLabels(test_instances, test_predicted_labels,
out_probabilities, args.write_class_confidence, args.test_predicted_labels_out)
if args.golden_labels:
golden_labels, golden_instances = classifier.LoadCregLabelsFile(args.golden_labels)
assert (golden_instances == test_instances)
classifier.WriteConfusionMatrix(test_predicted_labels, golden_labels, sys.stdout)
accuracy = classifier.CalcAccuracy(test_predicted_labels, golden_labels)
print "Held-out accuracy:", accuracy
if __name__ == '__main__':
main()
| gpl-2.0 |
AkademieOlympia/sympy | sympy/interactive/session.py | 43 | 15119 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from distutils.version import LooseVersion as V
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://docs.sympy.org/%(version)s
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
doc_version = sympy_version
if 'dev' in doc_version:
doc_version = "dev"
else:
doc_version = "%s.%s.%s/" % tuple(doc_version.split('.')[:3])
message += '\n' + verbose_message % {'source': _source,
'version': doc_version}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Examples
========
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if V(IPython.__version__) >= '0.11':
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
readline = import_module("readline")
if readline:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
if V(IPython.__version__) >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
if ip is None:
ip = init_ipython_session(argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if V(IPython.__version__) >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
readline = import_module("readline")
if auto_symbols and (not ipython or V(IPython.__version__) < '0.11' or not readline):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above with readline support")
if auto_int_to_Integer and (not ipython or V(IPython.__version__) < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
import atexit
atexit.register(lambda ip: ip.write("Exiting ...\n"), ip)
| bsd-3-clause |
egorburlakov/CrisisModelingPython | Main.py | 1 | 11779 | import pandas as pd
import numpy as np
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
################
#Pars
################
dat_types = {"SigCaught" : np.float32, "NSigs" : np.uint8, "NMissedSigs" : np.uint8, "NDecMade" : np.uint8, "CrT" : np.uint16, "NTp" : np.uint8, "NBr" : np.uint8, "MaxLev" : np.uint8, "NEmps" : np.uint8, "-1" : np.int16,
"ImpVar" : np.float32, "NoiseDet" : np.float32, "CatchT" : np.uint16, "ActT" : np.uint16, "DirT" : np.uint16, "DecT" : np.uint16, "Sigma1" : np.float32, "Sigma2" : np.float32, "CrOcc" : np.float32}
proc_dtypes = {"SigCaught" : np.float32, "NSigs" : np.uint8, "CrT" : np.uint16, "NEmps" : np.uint8, "TpT" : np.int16, "Mode" : np.int8, "TpLoad" : np.float32, "Cr" : np.int8}
#pars_pres = ["SigCaught", "NSigs", "NMissedSigs", "CrT", "NTp", "NBr", "MaxLev", "NEmps", "-1"]
pars_pres = ["SigCaught", "NSigs", "CrT", "NEmps", "-1"]
min_emp = 5
max_emp = 13
np.random.seed(1)
################
#Functions
################
def getFileNames(dir_name):
return [f for f in listdir(dir_name) if (isfile(dir_name + f) and f[:7] == "Results")]
def decodePar(s):
if s == "Fl" or s == "Sm" or s == "Mi":
return 1
elif s == "Hi" or s == "Su" or s == "Me":
return 2
return 3
def processFile(dir_name, f):
x1 = pd.read_csv(dir_name + f, sep = ";", dtype = dat_types, usecols = pars_pres)
x1 = x1[pd.notnull(x1['SigCaught'])]
pars_add = f.split('=')
x1["OrgSt"] = decodePar(pars_add[1][:2])
#x1["Cr"] = decodePar(pars_add[2][:2])
x1["Mode"] = decodePar(pars_add[3][:2])
return x1
def mutualDistCalc(y, x0, col): #finds distribution for y[col] from series x0[col]
n = y[col].value_counts()
p0 = x0[col].value_counts() / x0[col].value_counts().sum()
dist = min(n / p0) * p0
return dist.astype(int)
def dropExcessiveRaws(y, x0, col): #drop from y raws to get the same distribution for col as in x0
col_p = mutualDistCalc(y, x0, col) / y[col].value_counts() #bernulli distribution coefficient, i. e. probabilities of occurence for values in col
col_p.name = col + "_p" #just for joining
y = pd.merge(y, col_p.reset_index(), left_on = col, right_on = "index") #join probabilities to the target dataframe y
y["Drop"] = np.random.binomial(n = 1, size = y.shape[0], p = y[col + "_p"]) #choosing the raws to drop
y = y[y.Drop == 1] #dropping the raws
y.drop(["Drop", col + "_p", "index"], 1, inplace = True)
return y
################
#Reading
def readInitialFiles(dir_name):
files = getFileNames(dir_name)
flat = pd.DataFrame()
hier = pd.DataFrame()
for f in files:
print(f)
x1 = processFile(dir_name, f)
if(x1["OrgSt"][0] == 1): #if flat
flat = flat.append(x1.drop("OrgSt", 1), ignore_index = True)
else: # if hierarchy
hier = hier.append(x1.drop("OrgSt", 1), ignore_index = True)
#Transforming
hier = hier[(hier["NEmps"] >= min_emp) & (hier["NEmps"] <= max_emp)] #change to distribution of nemps in flat
hier = dropExcessiveRaws(hier, flat, "NEmps")
hier.rename(columns = {"-1" : "TpT"}, inplace = True)
flat.rename(columns = {"-1" : "TpT"}, inplace = True)
hier["TpLoad"] = hier["TpT"] / hier["CrT"]
flat["TpLoad"] = flat["TpT"] / flat["CrT"]
hier["Cr"] = np.where(hier.NSigs > 7, 2, 1)
flat["Cr"] = np.where(flat.NSigs > 7, 2, 1)
return flat, hier
def readProcessedFiles(dir_name):
flat = pd.read_csv(dir_name + "0Results.Flat.csv", sep = ",", dtype = proc_dtypes)
hier = pd.read_csv(dir_name + "0Results.Hier.csv", sep = ",", dtype = proc_dtypes)
print("Processed Files read successfully")
return flat, hier
dir_name = "C:\\PyCharm Community Edition 5.0.3\\Projects\\CrisisModeling\\"
all = readProcessedFiles("C:\\PyCharm Community Edition 5.0.3\\Projects\\CrisisModeling\\")
flat = all[0]
hier = all[1]
all = readInitialFiles(dir_name)
#flat = all[0]
#hier = all[1]
hier = hier.append(all[0], ignore_index = True)
flat = flat.append(all[1], ignore_index = True)
#hier.to_csv(dir_name + "0Results.Hier.csv")
#flat.to_csv(dir_name + "0Results.Flat.csv")
#hier.to_csv(dir_name + "0Results.Hier.csv", mode = "a")
#flat.to_csv(dir_name + "0Results.Flat.csv", mode = "a")
f_eta_empsig = flat["TpLoad"].groupby([flat["NSigs"], flat["NEmps"], flat["Mode"]]).mean().unstack()
h_eta_empsig = hier["TpLoad"].groupby([hier["NSigs"], hier["NEmps"], hier["Mode"]]).mean().unstack()
f_eta_empsig .xs((8), level = ("NSigs"))[2]
h_nu_nsigs = hier["SigCaught"].groupby([hier["NSigs"], hier["Mode"]]).mean().unstack()
f_nu_nsigs = flat["SigCaught"].groupby([flat["NSigs"], flat["Mode"]]).mean().unstack()
h_nu_nemps = hier["SigCaught"].groupby([hier["NEmps"], hier["Cr"], hier["Mode"]]).mean().unstack().reset_index()
f_nu_nemps = flat["SigCaught"].groupby([flat["NEmps"], flat["Cr"], flat["Mode"]]).mean().unstack().reset_index()
h_eta_nsigs = hier["TpLoad"].groupby([hier["NSigs"], hier["Mode"]]).mean().unstack()
f_eta_nsigs = flat["TpLoad"].groupby([flat["NSigs"], flat["Mode"]]).mean().unstack()
h_eta_nemps = hier["TpLoad"].groupby([hier["NEmps"], hier["Mode"]]).mean().unstack()
f_eta_nemps = flat["TpLoad"].groupby([flat["NEmps"], flat["Mode"]]).mean().unstack()
#Printing
#, ticks = [3, 5, 7, 9, 11, 13, 15, 17, 19]
#fig.subplots_adjust(wspace = 0, hspace = 0)
###################
#1
###################
fig = plt.figure(facecolor = "white")
ax0 = fig.add_subplot(1, 1, 1)
plt.plot(h_nu_nsigs.index[0:5], h_nu_nsigs[2][0:5], linestyle = "--", marker = "o", color = "green", label = "Centralized")
plt.plot(f_nu_nsigs.index[0:5], f_nu_nsigs[2][0:5], linestyle = "-", marker = "^" , color = "red", label = "Decentralized")
ax0.legend(loc = "best", fontsize = 11)
plt.plot(h_nu_nsigs.index[5:16], h_nu_nsigs[2][5:16], linestyle = "--", marker = "o", color = "green", label = "Centralized")
plt.plot(f_nu_nsigs.index[5:16], f_nu_nsigs[2][5:16], linestyle = "-", marker = "^" , color = "red", label = "Decentralized")
plt.axvline(7.5, color ='k', linestyle='--')
plt.grid()
plt.show()
###################
#2
###################
fig = plt.figure(facecolor = "white")
ax1 = fig.add_subplot(1, 1, 1)
plt.plot(h_nu_nsigs.index[5:16], h_nu_nsigs[3][5:16], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_nu_nsigs.index[5:16], f_nu_nsigs[3][5:16], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
ax1.legend(loc = "upper left", fontsize = 11)
plt.plot(h_nu_nsigs.index[0:5], h_nu_nsigs[3][0:5], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_nu_nsigs.index[0:5], f_nu_nsigs[3][0:5], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
plt.plot(h_nu_nsigs.index[0:5], h_nu_nsigs[1][0:5], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(h_nu_nsigs.index[5:16], h_nu_nsigs[1][5:16], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_nu_nsigs.index[0:5], f_nu_nsigs[1][0:5], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
plt.plot(f_nu_nsigs.index[5:16], f_nu_nsigs[1][5:16], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
plt.plot(h_nu_nsigs.index[0:5], h_nu_nsigs[2][0:5], linestyle = "--", marker = "o", color = (0, 0.5, 0), label = "Centralized", linewidth = 3)
plt.plot(h_nu_nsigs.index[5:16], h_nu_nsigs[2][5:16], linestyle = "--", marker = "o", color = (0, 0.5, 0), label = "Centralized", linewidth = 3)
plt.plot(f_nu_nsigs.index[0:5], f_nu_nsigs[2][0:5], linestyle = "-", marker = "^" , color = (0.5, 0, 0), label = "Decentralized", linewidth = 3)
plt.plot(f_nu_nsigs.index[5:16], f_nu_nsigs[2][5:16], linestyle = "-", marker = "^" , color = (0.5, 0, 0), label = "Decentralized", linewidth = 3)
#ax1.set_ylabel(r'$\eta$', fontsize = 20)
#ax1.set_xlabel("N of Signals", fontsize = 16)
plt.axvline(7.5, color ='k', linestyle='--')
plt.grid()
plt.show()
###################
#3
###################
fig = plt.figure(facecolor = "white")
ax2 = fig.add_subplot(1, 1, 1)
plt.plot(h_nu_nemps[h_nu_nemps.Cr == 1].NEmps, h_nu_nemps[h_nu_nemps.Cr == 1].ix[:,3], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_nu_nemps[h_nu_nemps.Cr == 1].NEmps, f_nu_nemps[h_nu_nemps.Cr == 1].ix[:,3], linestyle = "-", marker = "^", color = "g", label = "Decentralized")
ax2.legend(loc = "upper left", fontsize = 11)
plt.plot(h_nu_nemps[h_nu_nemps.Cr == 2].NEmps, h_nu_nemps[h_nu_nemps.Cr == 2].ix[:,3], linestyle = "--", marker = "o", color = "r", label = "Centralized")
plt.plot(f_nu_nemps[h_nu_nemps.Cr == 2].NEmps, f_nu_nemps[h_nu_nemps.Cr == 2].ix[:,3], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
#ax2.set_ylabel(r'$\eta$', fontsize = 20)
#ax2.set_xlabel("N of Employees", fontsize = 16)
plt.grid()
plt.show()
###################
#4
###################
fig = plt.figure(facecolor = "white")
ax3 = fig.add_subplot(1, 1, 1)
plt.plot(h_eta_nsigs.index[0:5], h_eta_nsigs[2][0:5], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_eta_nsigs.index[0:5], f_eta_nsigs[2][0:5], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
ax3.legend(loc = "best", fontsize = 11)
plt.plot(h_eta_nsigs.index[5:16], h_eta_nsigs[2][5:16], linestyle = "--", marker = "o", color = "g", label = "Centralized")
plt.plot(f_eta_nsigs.index[5:16], f_eta_nsigs[2][5:16], linestyle = "-", marker = "^", color = "r", label = "Decentralized")
#ax3.set_ylabel(r'$\nu$', fontsize = 20)
#ax3.set_xlabel("N of Signals", fontsize = 16)
plt.axvline(7.5, color ='k', linestyle='--')
plt.grid()
plt.show()
###################
#5
###################
fig = plt.figure(facecolor = "white")
ax4 = fig.add_subplot(1, 1, 1)
plt.plot(h_eta_nsigs.index[0:5], h_eta_nsigs[1][0:5], linestyle = "--", marker = "o", color = "g", label = "Centralized", linewidth = 1)
plt.plot(f_eta_nsigs.index[0:5], f_eta_nsigs[1][0:5], linestyle = "-", marker = "^", color = "r", label = "Decentralized", linewidth = 1)
ax4.legend(loc = "best", fontsize = 11)
plt.plot(h_eta_nsigs.index[5:16], h_eta_nsigs[1][5:16], linestyle = "--", marker = "o", color = "g", label = "Centralized", linewidth = 1)
plt.plot(f_eta_nsigs.index[5:16], f_eta_nsigs[1][5:16], linestyle = "-", marker = "^", color = "r", label = "Decentralized", linewidth = 1)
plt.plot(h_eta_nsigs.index[0:5], h_eta_nsigs[2][0:5], linestyle = "--", marker = "o", color = (0, 0.5, 0), label = "Centralized", linewidth = 3)
plt.plot(h_eta_nsigs.index[5:16], h_eta_nsigs[2][5:16], linestyle = "--", marker = "o", color = (0, 0.5, 0), label = "Centralized", linewidth = 3)
plt.plot(h_eta_nsigs.index[0:5], h_eta_nsigs[3][0:5], linestyle = "--", marker = "o", color = "g", label = "Centralized", linewidth = 1)
plt.plot(h_eta_nsigs.index[5:16], h_eta_nsigs[3][5:16], linestyle = "--", marker = "o", color = "g", label = "Centralized", linewidth = 1)
plt.plot(f_eta_nsigs.index[0:5], f_eta_nsigs[2][0:5], linestyle = "-", marker = "^", color = (0.5, 0, 0), label = "Decentralized", linewidth = 3)
plt.plot(f_eta_nsigs.index[5:16], f_eta_nsigs[2][5:16], linestyle = "-", marker = "^", color = (0.5, 0, 0), label = "Decentralized", linewidth = 3)
plt.plot(f_eta_nsigs.index[0:5], f_eta_nsigs[3][0:5], linestyle = "-", marker = "^", color = "r", label = "Decentralized", linewidth = 1)
plt.plot(f_eta_nsigs.index[5:16], f_eta_nsigs[3][5:16], linestyle = "-", marker = "^", color = "r", label = "Decentralized", linewidth = 1)
#ax4.set_ylabel(r'$\nu$', fontsize = 20)
#ax4.set_xlabel("N of Signals", fontsize = 16)
plt.axvline(7.5, color ='k', linestyle='--')
plt.grid()
plt.show()
#print(flat.describe())
#print(hier.describe())
#sys.modules[__name__].__dict__.clear() | gpl-2.0 |
lux-jwang/goodoos | drawnetwork.py | 1 | 2393 | #! /usr/bin/env python
# We probably will need some things from several places
import sys
# We need to import the graph_tool module itself
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from graph_tool.all import *
sys.path.append("./src")
from dataset import get_friends_data_index
def draw_edges(gx, src, dsts):
for dst in dsts:
gx.add_edge(dst,src)
def map_indx_to_vertex(gx, indxs):
vers = []
for indx in indxs:
vers.append(gx.vertex(indx))
return vers
def construct_graphic():
ui_mat, roster = get_friends_data_index()
total_num = len(roster.keys())
gx = Graph()
gx.set_directed(True)
vlist = gx.add_vertex(total_num)
print len(ui_mat.keys())
for ky, val in ui_mat.iteritems():
src_v = gx.vertex(ky)
dsts_v = map_indx_to_vertex(gx,val)
draw_edges(gx,src_v,dsts_v)
return gx, ui_mat
def get_shortest_distance(gx,srcs):
valid_distance = []
for src in srcs:
dist = shortest_distance(gx,source=gx.vertex(src))
for item in dist.a:
if item < 2000:
valid_distance.append(item)
print " "
print "average degree: ", np.mean(valid_distance)
#hist
matplotlib.rcParams.update({'font.size':28})
title = "Six Degrees of Separation ( Avg: "+str(round(np.mean(valid_distance),4))+" )"
plt.hist(valid_distance, bins=10, facecolor='grey')
plt.title(title)
plt.xlabel("Degrees")
plt.ylabel("Frequency")
plt.axis('tight')
plt.show()
def get_shortest_distance_uni(gx):
valid_distance = []
dist = shortest_distance(gx)
for item in dist.a:
if item < 2000:
valid_distance.append(item)
print "average degree: ", np.mean(valid_distance)
#hist
title = "Six Degrees of Separation ( Avg: "+str(round(np.mean(valid_distance),4))+" )"
plt.hist(valid_distance)
plt.title(title)
plt.xlabel("Degrees")
plt.ylabel("Frequency")
plt.axis('tight')
plt.show()
def show_graphic(gx):
pos = sfdp_layout(gx)
graph_draw(gx, pos=pos, vertex_text=gx.vertex_index, vertex_font_size=11,output_size=(4800,4800),output="friends_relation_1010_3.png")
if __name__ == '__main__':
g, ui_mat = construct_graphic()
srcs = ui_mat.keys()
get_shortest_distance(g,srcs)
#get_shortest_distance_uni(g)
#show_graphic(g)
| mit |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 37 | 5114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| mit |
JohnKendrick/PDielec | PDielec/GUI/AnalysisTab.py | 1 | 21560 | import math
import numpy as np
import PDielec.Calculator as Calculator
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtWidgets import QComboBox, QLabel, QLineEdit
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFormLayout
from PyQt5.QtWidgets import QSpinBox, QDoubleSpinBox
from PyQt5.QtWidgets import QSizePolicy, QTableWidgetItem
from PyQt5.QtCore import Qt, QCoreApplication
from PDielec.Constants import covalent_radii
# Import plotting requirements
import matplotlib
import matplotlib.figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from PDielec.Utilities import Debug
from PDielec.GUI.SettingsTab import FixedQTableWidget
class AnalysisTab(QWidget):
def __init__(self, parent, debug=False ):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'AnalysisTab')
self.settings = {}
self.subplot = None
self.setWindowTitle('Analysis')
self.settings['Minimum frequency'] = -1
self.settings['Maximum frequency'] = 400
self.settings['title'] = 'Analysis'
self.settings['Covalent radius scaling'] = 1.1
self.settings['Bonding tolerance'] = 0.1
self.settings['Bar width'] = 0.5
self.dirty = True
self.plot_types = ['Internal vs External','Molecular Composition']
self.plot_type_index = 0
self.number_of_molecules = 0
self.frequency_units = None
self.cell_of_molecules = None
self.original_atomic_order = None
self.frequencies_cm1 = []
self.mode_energies = []
self.element_radii = covalent_radii
self.species = []
# store the notebook
self.notebook = parent
# get the reader from the main tab
self.reader = self.notebook.mainTab.reader
# Create last tab - AnalysisTab
vbox = QVBoxLayout()
form = QFormLayout()
#
# The minimum frequency
#
self.vmin_sb = QSpinBox(self)
self.vmin_sb.setRange(-100,9000)
self.vmin_sb.setValue(self.settings['Minimum frequency'])
self.vmin_sb.setToolTip('Set the minimum frequency to be considered)')
self.vmin_sb.valueChanged.connect(self.on_vmin_changed)
label = QLabel('Minimum frequency:', self)
label.setToolTip('Set the minimum frequency to be considered)')
form.addRow(label, self.vmin_sb)
#
# The maximum frequency
#
self.vmax_sb = QSpinBox(self)
self.vmax_sb.setRange(0,9000)
self.vmax_sb.setValue(self.settings['Maximum frequency'])
self.vmax_sb.setToolTip('Set the maximum frequency to be considered)')
self.vmax_sb.valueChanged.connect(self.on_vmax_changed)
label = QLabel('Maximum frequency:', self)
label.setToolTip('Set the maximum frequency to be considered)')
form.addRow(label, self.vmax_sb)
#
# The bonding tolerance and scaling of radii frequency
#
hbox = QHBoxLayout()
self.scale_sp = QDoubleSpinBox(self)
self.scale_sp.setRange(0.01,10.0)
self.scale_sp.setSingleStep(0.01)
self.scale_sp.setDecimals(2)
self.scale_sp.setValue(self.settings['Covalent radius scaling'])
self.scale_sp.setToolTip('Scale the covalent radii to determine bonding')
self.scale_sp.valueChanged.connect(self.on_scale_changed)
hbox.addWidget(self.scale_sp)
self.tolerance_sp = QDoubleSpinBox(self)
self.tolerance_sp.setRange(0.01,2.0)
self.tolerance_sp.setSingleStep(0.01)
self.tolerance_sp.setDecimals(2)
self.tolerance_sp.setValue(self.settings['Bonding tolerance'])
self.tolerance_sp.setToolTip('Tolerance for bonding is determined from scale*(radi+radj)+toler')
self.tolerance_sp.valueChanged.connect(self.on_tolerance_changed)
hbox.addWidget(self.tolerance_sp)
label = QLabel('Bonding scale and tolerance', self)
label.setToolTip('Bonding is determined from scale*(radi+radj)+toler')
form.addRow(label, hbox)
# Add a table of covalent radii
self.element_radii_tw = FixedQTableWidget(self)
self.element_radii_tw.setToolTip('Individual covalent radii used to determine bonding can be set here')
self.element_radii_tw.itemClicked.connect(self.on_element_radii_tw_itemClicked)
self.element_radii_tw.itemChanged.connect(self.on_element_radii_tw_itemChanged)
self.element_radii_tw.setRowCount(1)
self.element_radii_tw.blockSignals(False)
sizePolicy = QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum)
self.element_radii_tw.setSizePolicy(sizePolicy)
form.addRow(QLabel('Atomic radii', self), self.element_radii_tw)
# Add number of molecules found
self.molecules_le = QLineEdit(self)
self.molecules_le.setEnabled(False)
self.molecules_le.setText('{}'.format(self.number_of_molecules))
self.molecules_le.setToolTip('The bonding tolerances can change the number of molecules found')
label = QLabel('Number of molecules found', self)
label.setToolTip('The bonding tolerances can change the number of molecules found')
form.addRow(label, self.molecules_le)
#
# The plotting width of bar
#
self.width_sp = QDoubleSpinBox(self)
self.width_sp.setRange(0.01,2.0)
self.width_sp.setSingleStep(0.01)
self.width_sp.setDecimals(2)
self.width_sp.setValue(self.settings['Bar width'])
self.width_sp.setToolTip('Change the width of the bars - should be between 0 and 1')
self.width_sp.valueChanged.connect(self.on_width_changed)
label = QLabel('Bar width', self)
label.setToolTip('Change the width of the bars - should be between 0 and 1')
form.addRow(label, self.width_sp)
#
# Set the plot title
#
self.title_le = QLineEdit(self)
self.title_le.setToolTip('Set the plot title')
self.title_le.setText(self.settings['title'])
self.title_le.textChanged.connect(self.on_title_changed)
label = QLabel('Plot title', self)
label.setToolTip('Set the plot title')
form.addRow(label, self.title_le)
#
# Add a comb box to select which type of plot
#
self.plottype_cb = QComboBox(self)
self.plottype_cb.setToolTip('The energy can be decomposed either according to internal vs external motion or into a molecular based decompostion')
self.plottype_cb.addItems(self.plot_types)
self.plottype_cb.setCurrentIndex(self.plot_type_index)
self.plottype_cb.currentIndexChanged.connect(self.on_plottype_cb_changed)
label = QLabel('Choose the plot type', self)
label.setToolTip('The energy can be decomposed either according to internal vs external motion or into a molecular based decompostion')
form.addRow(label, self.plottype_cb)
#
# Add the matplotlib figure to the bottom
#
self.figure = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed))
form.addRow(self.canvas)
form.addRow(self.toolbar)
vbox.addLayout(form)
# finalise the layout
self.setLayout(vbox)
QCoreApplication.processEvents()
#if self.notebook.spreadsheet is not None:
# self.write_spreadsheet()
#QCoreApplication.processEvents()
def on_element_radii_tw_itemClicked(self,item):
self.element_radii_tw.blockSignals(False)
def on_element_radii_tw_itemChanged(self,item):
if self.reader is None:
return
col = item.column()
try:
debugger.print('Changing the element radius',col,item.text())
self.element_radii[self.species[col]] = float(item.text())
self.calculate()
self.plot()
except:
debugger.print('Failed Changing the element radius',col,item.text())
pass
def set_radii_tw(self):
if self.reader is None:
return
self.element_radii_tw.blockSignals(True)
self.species = self.reader.getSpecies()
radii = [ self.element_radii[el] for el in self.species ]
self.element_radii_tw.setColumnCount(len(self.species))
self.element_radii_tw.setHorizontalHeaderLabels(self.species)
self.element_radii_tw.setVerticalHeaderLabels([''])
for i,(radius,element) in enumerate(zip(radii,self.species)):
qw = QTableWidgetItem()
qw.setText('{0:.6f}'.format(radius))
qw.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.element_radii_tw.setItem(0,i, qw )
self.element_radii_tw.blockSignals(False)
# end if
return
def setCovalentRadius(self,element,radius):
self.element_radii[element] = radius
self.set_radii_tw()
self.calculate()
self.plot()
def write_spreadsheet(self):
if self.notebook.spreadsheet is None:
return
sp = self.notebook.spreadsheet
sp.selectWorkSheet('Analysis')
sp.delete()
sp.writeNextRow(['Analysis of the vibrational modes into percentage contributions for molecules and internal/external modes'], row=0,col=1)
headers = ['Mode','Frequency (cm-1)', 'Centre of mass %','Rotational %', 'Vibrational %']
for mol in range(self.number_of_molecules):
headers.append('Molecule '+str(mol)+' %')
#
sp.writeNextRow(headers,col=1)
for imode,(freq,energies) in enumerate(zip(self.frequencies_cm1,self.mode_energies)):
tote,cme,rote,vibe, molecular_energies = energies
output = [ imode, freq, 100*cme/tote, 100*rote/tote, 100*vibe/tote ]
for e in molecular_energies:
output.append(100*e/tote)
sp.writeNextRow(output,col=1,check=1)
def on_width_changed(self,value):
debugger.print('on width changed ', value)
self.settings['Bar width'] = value
self.plot()
def on_scale_changed(self,value):
debugger.print('on scale_le changed ', value)
self.settings['Covalent radius scaling'] = value
self.dirty = True
self.calculate()
self.plot()
def on_tolerance_changed(self,value):
debugger.print('on_tolerance_le changed ', value)
self.settings['Bonding tolerance'] = value
self.dirty = True
self.calculate()
self.plot()
def on_title_changed(self,text):
self.settings['title'] = text
if self.subplot is not None:
self.subplot.set_title(self.settings['title'])
self.canvas.draw_idle()
debugger.print('on title change ', self.settings['title'])
def on_vmin_changed(self):
self.settings['Minimum frequency'] = self.vmin_sb.value()
debugger.print('on vmin change ', self.settings['Minimum frequency'])
vmin = self.settings['Minimum frequency']
vmax = self.settings['Maximum frequency']
if vmax > vmin:
self.plot()
def on_vmax_changed(self):
self.settings['Maximum frequency'] = self.vmax_sb.value()
debugger.print('on vmax change ', self.settings['Maximum frequency'])
vmin = self.settings['Minimum frequency']
vmax = self.settings['Maximum frequency']
if vmax > vmin:
self.plot()
def refresh(self, force=False):
if not self.dirty and not force and not self.notebook.analysisCalculationRequired:
debugger.print('return with no refresh', self.dirty, force, self.notebook.analysisCalculationRequired)
return
debugger.print('Refreshing widget')
#
# Block signals during refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(True)
self.vmin_sb.setValue(self.settings['Minimum frequency'])
self.vmax_sb.setValue(self.settings['Maximum frequency'])
self.scale_sp.setValue(self.settings['Covalent radius scaling'])
self.tolerance_sp.setValue(self.settings['Bonding tolerance'])
self.molecules_le.setText('{}'.format(self.number_of_molecules))
self.width_sp.setValue(self.settings['Bar width'])
self.title_le.setText(self.settings['title'])
self.plottype_cb.setCurrentIndex(self.plot_type_index)
self.calculate()
self.set_radii_tw()
self.plot()
self.notebook.analysisCalculationRequired = False
#
# Unlock signals after refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(False)
return
def on_plottype_cb_changed(self, index):
self.plot_type_index = index
debugger.print('Plot type index changed to ', self.plot_type_index)
self.plot()
def calculate(self):
debugger.print('calculate')
# Assemble the mainTab settings
settings = self.notebook.mainTab.settings
program = settings['Program']
filename = settings['Output file name']
self.reader = self.notebook.mainTab.reader
if self.reader is None:
return
if program == '':
return
if filename == '':
return
QApplication.setOverrideCursor(Qt.WaitCursor)
# Assemble the settingsTab settings
settings = self.notebook.settingsTab.settings
self.frequencies_cm1 = self.notebook.settingsTab.frequencies_cm1
mass_weighted_normal_modes = self.notebook.settingsTab.mass_weighted_normal_modes
scale = self.settings['Covalent radius scaling']
tolerance = self.settings['Bonding tolerance']
# Find the last unit cell read by the reader and its masses
cell = self.reader.unit_cells[-1]
atom_masses = self.reader.masses
cell.set_atomic_masses(atom_masses)
self.cell_of_molecules,nmols,self.original_atomic_order = cell.calculate_molecular_contents(scale, tolerance, covalent_radii)
# if the number of molecules has changed then tell the viewerTab that the cell has changed
if self.number_of_molecules != nmols:
#self.notebook.viewerTab.refresh(force=True)
self.notebook.visualerCalculationRquired = True
self.number_of_molecules = nmols
self.molecules_le.setText('{}'.format(self.number_of_molecules))
# get the normal modes from the mass weighted ones
normal_modes = Calculator.normal_modes(atom_masses, mass_weighted_normal_modes)
# Reorder the atoms so that the mass weighted normal modes order agrees with the ordering in the cell_of_molecules cell
nmodes,nions,temp = np.shape(normal_modes)
self.new_normal_modes = np.zeros( (nmodes,3*nions) )
self.new_mass_weighted_normal_modes = np.zeros( (nmodes,3*nions) )
masses = self.cell_of_molecules.atomic_masses
for imode,mode in enumerate(mass_weighted_normal_modes):
for index,old_index in enumerate(self.original_atomic_order):
i = index*3
j = old_index*3
self.new_mass_weighted_normal_modes[imode,i+0] = mode[old_index][0]
self.new_mass_weighted_normal_modes[imode,i+1] = mode[old_index][1]
self.new_mass_weighted_normal_modes[imode,i+2] = mode[old_index][2]
self.new_normal_modes[imode,i+0] = self.new_mass_weighted_normal_modes[imode,i+0] / math.sqrt(masses[index])
self.new_normal_modes[imode,i+1] = self.new_mass_weighted_normal_modes[imode,i+1] / math.sqrt(masses[index])
self.new_normal_modes[imode,i+2] = self.new_mass_weighted_normal_modes[imode,i+2] / math.sqrt(masses[index])
# Calculate the distribution in energy for the normal modes
mode_energies = Calculator.calculate_energy_distribution(self.cell_of_molecules, self.frequencies_cm1, self.new_mass_weighted_normal_modes)
# Deal with degeneracies
degenerate_list = [ [] for f in self.frequencies_cm1]
for i,fi in enumerate(self.frequencies_cm1):
for j,fj in enumerate(self.frequencies_cm1):
if abs(fi-fj) < 1.0E-5:
degenerate_list[i].append(j)
self.mode_energies = []
for i,fi in enumerate(self.frequencies_cm1):
tote,cme,rote,vibe,mole = mode_energies[i]
sums = [0.0]*5
sume = [0.0]*len(mole)
degeneracy = len(degenerate_list[i])
for j in degenerate_list[i]:
tote,cme,rote,vibe,mole = mode_energies[j]
sums[0] += tote / degeneracy
sums[1] += cme / degeneracy
sums[2] += rote / degeneracy
sums[3] += vibe / degeneracy
for k,e in enumerate(mole):
sume[k] += e / degeneracy
sums[4] = sume
self.mode_energies.append(sums)
# Store the results in the spread shee
# if self.notebook.spreadsheet is not None:
# self.write_spreadsheet()
# Flag that a recalculation is not needed
self.dirty = False
QApplication.restoreOverrideCursor()
def plot(self):
if self.reader is None:
return
if len(self.frequencies_cm1) <= 0:
return
if self.plot_type_index == 0:
self.plot_internal_external()
else:
self.plot_molecular()
def plot_molecular(self):
self.subplot = None
self.figure.clf()
self.subplot = self.figure.add_subplot(111)
xlabel = 'Molecule'
ylabel = 'Percentage energy'
# Decide which modes to analyse
mode_list = []
mode_list_text = []
vmin = self.settings['Minimum frequency']
vmax = self.settings['Maximum frequency']
tote,cme,rote,vibe,mole = self.mode_energies[0]
mol_energies = None
mol_energies = [ [] for _ in range(self.number_of_molecules) ]
mol_bottoms = [ [] for _ in range(self.number_of_molecules) ]
for imode, frequency in enumerate(self.frequencies_cm1):
if frequency >= vmin and frequency <= vmax:
mode_list.append(imode)
mode_list_text.append(str(imode))
tote,cme,rote,vibe,mole = self.mode_energies[imode]
for i,mol in enumerate(mole):
mol_energies[i].append(100.0*mol/tote)
if i == 0:
mol_bottoms[i].append(0.0)
else:
mol_bottoms[i].append(mol_bottoms[i-1][-1]+mol_energies[i-1][-1])
width = self.settings['Bar width']
plots = []
colours = ['y','b','r','c','m','k']
for i,(energies,bottoms) in enumerate(zip(mol_energies, mol_bottoms )):
plots.append(self.subplot.bar(mode_list,energies,width, bottom=bottoms,color=colours[i%6]))
legends = []
for i in range(self.number_of_molecules):
legends.append('Molecule '+str(i))
self.subplot.set_xlabel(xlabel)
self.subplot.set_ylabel(ylabel)
self.subplot.legend( plots, legends)
self.subplot.set_title('Molecular Composition of Vibrational Energy')
self.canvas.draw_idle()
def plot_internal_external(self):
self.subplot = None
self.figure.clf()
self.subplot = self.figure.add_subplot(111)
xlabel = 'Mode Number'
ylabel = 'Percentage energy'
# Decide which modes to analyse
mode_list = []
mode_list_text = []
cme_energy = []
rot_energy = []
vib_energy = []
vib_bottom = []
mol_energy = []
vmin = self.settings['Minimum frequency']
vmax = self.settings['Maximum frequency']
colours = ['y','b','r','c','m','k']
for imode, frequency in enumerate(self.frequencies_cm1):
if frequency >= vmin and frequency <= vmax:
mode_list.append(imode)
mode_list_text.append(str(imode))
tote,cme,rote,vibe,molecular_energies = self.mode_energies[imode]
cme_energy.append(cme/tote*100.0)
rot_energy.append(rote/tote*100.0)
vib_energy.append(vibe/tote*100.0)
vib_bottom.append( (cme+rote)/tote*100.0 )
mol_energy.append(molecular_energies/tote*100)
width = self.settings['Bar width']
p1 = self.subplot.bar(mode_list,cme_energy,width,color=colours[0])
p2 = self.subplot.bar(mode_list,rot_energy,width,bottom=cme_energy,color=colours[1])
p3 = self.subplot.bar(mode_list,vib_energy,width,bottom=vib_bottom,color=colours[2])
plots = ( p1[0], p2[0], p3[0] )
legends = ('translation','rotation','vibration')
self.subplot.set_xlabel(xlabel)
self.subplot.set_ylabel(ylabel)
self.subplot.legend( plots, legends)
self.subplot.set_title('Internal-External Composition of Vibrational Energy')
self.canvas.draw_idle()
| mit |
rexshihaoren/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
daviddesancho/BestMSM | bestmsm/visual_lib.py | 1 | 3173 | import sys
import math
import itertools
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# Much indebted to Randal Olson (http://www.randalolson.com/)
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
figsize = (12,14)
def getridofticks():
""" Ticking only at bottom and left of plot"""
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def write_dot(J, nodeweight=None, rank=None, out="out.dot"):
""" Function for printing a graph in dot format
Parameters
----------
J : array
A directed graph in the form of a matrix, with values corresponding to weights
nodeweight : array
Weights for the nodes.
rank : list, array
The rank order for display of nodes.
out : string
Filename for output.
"""
print "\n Writing graph in dot format..."
fout = open(out, "w")
fout.write("strict digraph G {\n")
D = nx.DiGraph(J.transpose())
nd = len(D.nodes())
# define weights of nodes
try:
lw = np.log(nodeweight)
#wmin = np.min(lw)
#wmax = np.max(lw)
#norm = wmax - wmin
#weight = [(x - wmin)/norm + 0.5 for x in lw]
weight = 0.2*(lw - np.min(lw) -np.max(lw))
except AttributeError:
weight = np.ones(nd)
elems = zip(*D.edges())[0] + zip(*D.edges())[1]
for n in D.nodes():
if n in elems:
fout.write("%i [shape=circle,width=%f];\n"%(n, weight[n]))
# define rank of nodes
try:
for u,v in itertools.product(D.nodes(),D.nodes()):
if u < v:
if u in elems and v in elems:
if rank[u] == rank[v]:
fout.write ("{rank=same; %i; %i;}\n"%(u,v))
except TypeError:
pass
# define weights of edges
weight = np.zeros((nd, nd), float)
try:
wmin = np.min(J[J>0])
for u, v in D.edges():
D[u][v]['weight'] = J[v,u]/wmin
except TypeError:
for u, v in D.edges():
D[u][v]['weight'] = 1.
for u,v,d in D.edges_iter(data=True):
if u != v and not math.isnan(d['weight']):
fout.write("%i -> %i [penwidth=%f,color=black];\n"%(u, v, np.log(d['weight'])+1))
fout.write("}")
def plot_evals(vals, log=False):
""" Plot eigenvalues """
fig = plt.figure()
ax = plt.add_subplots(1,1,1)
getridofticks()
if not log:
plt.plot(vals)
else:
plt.semilogy(vals)
ax.set_xlabel(r'$Eigenvalue$')
ax.set_ylabel(r'$\tau$')
plt.show()
| gpl-2.0 |
eqcorrscan/ci.testing | eqcorrscan/utils/despike.py | 1 | 7758 | """
Functions for despiking seismic data.
.. warning:: Despike functions are in beta, they do not work that well.
.. todo:: Deconvolve spike to remove it, find peaks in the f-domain.
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from eqcorrscan.utils.timer import Timer
def median_filter(tr, multiplier=10, windowlength=0.5,
interp_len=0.05, debug=0):
"""
Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:type debug: int
:param debug: Debug output level between 0 and 5, higher is more output.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution.
"""
num_cores = cpu_count()
if debug >= 1:
data_in = tr.copy()
# Note - might be worth finding spikes in filtered data
filt = tr.copy()
filt.detrend('linear')
filt.filter('bandpass', freqmin=10.0,
freqmax=(tr.stats.sampling_rate / 2) - 1)
data = filt.data
del(filt)
# Loop through windows
_windowlength = int(windowlength * tr.stats.sampling_rate)
_interp_len = int(interp_len * tr.stats.sampling_rate)
peaks = []
with Timer() as t:
pool = Pool(processes=num_cores)
results = [pool.apply_async(_median_window,
args=(data[chunk * _windowlength:
(chunk + 1) * _windowlength],
chunk * _windowlength, multiplier,
tr.stats.starttime + windowlength,
tr.stats.sampling_rate,
debug))
for chunk in range(int(len(data) / _windowlength))]
pool.close()
for p in results:
peaks += p.get()
pool.join()
for peak in peaks:
tr.data = _interp_gap(tr.data, peak[1], _interp_len)
print("Despiking took: %s s" % t.secs)
if debug >= 1:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr
def _median_window(window, window_start, multiplier, starttime, sampling_rate,
debug=0):
"""
Internal function to aid parallel processing
:type window: numpy.ndarry
:param window: Data to look for peaks in.
:type window_start: int
:param window_start: Index of window start point in larger array, used \
for peak indexing.
:type multiplier: float
:param multiplier: Multiple of MAD to use as threshold
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime of window, used in debug plotting.
:type sampling_rate: float
:param sampling_rate in Hz, used for debug plotting
:type debug: int
:param debug: debug level, if want plots, >= 4.
:returns: peaks
:rtype: list
"""
from eqcorrscan.utils.findpeaks import find_peaks2_short
from eqcorrscan.utils.plotting import peaks_plot
MAD = np.median(np.abs(window))
thresh = multiplier * MAD
if debug >= 2:
print('Threshold for window is: ' + str(thresh) +
'\nMedian is: ' + str(MAD) +
'\nMax is: ' + str(np.max(window)))
peaks = find_peaks2_short(arr=window,
thresh=thresh, trig_int=5, debug=0)
if debug >= 4 and peaks:
peaks_plot(window, starttime, sampling_rate,
save=False, peaks=peaks)
if peaks:
peaks = [(peak[0], peak[1] + window_start) for peak in peaks]
else:
peaks = []
return peaks
def _interp_gap(data, peak_loc, interp_len):
"""
Internal function for filling gap with linear interpolation
:type data: numpy.ndarray
:param data: data to remove peak in
:type peak_loc: int
:param peak_loc: peak location position
:type interp_len: int
:param interp_len: window to interpolate
:returns: Trace works in-place
:rtype: :class:`obspy.core.trace.Trace`
"""
start_loc = peak_loc - int(0.5 * interp_len)
end_loc = peak_loc + int(0.5 * interp_len)
if start_loc < 0:
start_loc = 0
if end_loc > len(data) - 1:
end_loc = len(data) - 1
fill = np.linspace(data[start_loc], data[end_loc], end_loc - start_loc)
data[start_loc:end_loc] = fill
return data
def template_remove(tr, template, cc_thresh, windowlength,
interp_len, debug=0):
"""
Looks for instances of template in the trace and removes the matches.
:type tr: obspy.core.trace.Trace
:param tr: Trace to remove spikes from.
:type template: osbpy.core.trace.Trace
:param template: Spike template to look for in data.
:type cc_thresh: float
:param cc_thresh: Cross-correlation threshold (-1 - 1).
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Window length to remove and fill in seconds.
:type debug: int
:param debug: Debug level.
:returns: tr, works in place.
:rtype: :class:`obspy.core.trace.Trace`
"""
from eqcorrscan.core.match_filter import normxcorr2
from eqcorrscan.utils.findpeaks import find_peaks2_short
from obspy import Trace
from eqcorrscan.utils.timer import Timer
import matplotlib.pyplot as plt
import warnings
data_in = tr.copy()
_interp_len = int(tr.stats.sampling_rate * interp_len)
if _interp_len < len(template.data):
warnings.warn('Interp_len is less than the length of the template,'
'will used the length of the template!')
_interp_len = len(template.data)
if isinstance(template, Trace):
template = template.data
with Timer() as t:
cc = normxcorr2(tr.data.astype(np.float32),
template.astype(np.float32))
if debug > 3:
plt.plot(cc.flatten(), 'k', label='cross-correlation')
plt.legend()
plt.show()
peaks = find_peaks2_short(arr=cc.flatten(), thresh=cc_thresh,
trig_int=windowlength * tr.stats.
sampling_rate)
for peak in peaks:
tr.data = _interp_gap(data=tr.data,
peak_loc=peak[1] + int(0.5 * _interp_len),
interp_len=_interp_len)
print("Despiking took: %s s" % t.secs)
if debug > 2:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr
if __name__ == '__main__':
import doctest
doctest.testmod()
| lgpl-3.0 |
0todd0000/spm1d | spm1d/rft1d/examples/paper/fig11_val_cluster.py | 1 | 2030 |
import numpy as np
from scipy import stats
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps #smallest float
### EPS production preliminaries:
fig_width_mm = 100
fig_height_mm = 80
mm2in = 1/25.4
fig_width = fig_width_mm*mm2in # width in inches
fig_height = fig_height_mm*mm2in # height in inches
params = { 'backend':'ps', 'axes.labelsize':14,
'font.size':12, 'text.usetex': False, 'legend.fontsize':12,
'xtick.labelsize':8, 'ytick.labelsize':8,
'font.family':'Times New Roman', #Times
'lines.linewidth':0.5,
'patch.linewidth':0.25,
'figure.figsize': [fig_width,fig_height]}
pyplot.rcParams.update(params)
#(0) Set parameters:
np.random.seed(0)
nResponses = 1000 #raise this to 10000 to reproduce the results from the paper
nNodes = 101
FWHM = 10.0
interp = True
wrap = True
heights = [2.2, 2.4, 2.6, 2.8]
### generate data:
y = rft1d.randn1d(nResponses, nNodes, FWHM)
calc = rft1d.geom.ClusterMetricCalculator()
rftcalc = rft1d.prob.RFTCalculator(STAT='Z', nodes=nNodes, FWHM=FWHM)
#(1) Maximum region size:
K0 = np.linspace(eps, 15, 21)
K = np.array([[calc.max_cluster_extent(yy, h, interp, wrap) for yy in y] for h in heights])
P = np.array([(K>=k0).mean(axis=1) for k0 in K0]).T
P0 = np.array([[rftcalc.p.cluster(k0, h) for k0 in K0/FWHM] for h in heights])
#(2) Plot results:
pyplot.close('all')
colors = ['b', 'g', 'r', 'orange']
labels = ['$u$ = %.1f'%h for h in heights]
ax = pyplot.axes([0.17,0.14,0.80,0.84])
for color,p,p0,label in zip(colors,P,P0,labels):
ax.plot(K0, p, 'o', color=color, markersize=5)
ax.plot(K0, p0, '-', color=color, label=label)
ax.plot([0,1],[10,10], 'k-', label='Theoretical')
ax.plot([0,1],[10,10], 'ko-', label='Simulated', markersize=5)
ax.legend()
ax.set_xlabel('$x$', size=16)
ax.set_ylabel('$P(k_{max}) > x$', size=16)
ax.set_ylim(0, 0.25)
pyplot.show()
# pyplot.savefig('fig_valid_gauss1d_clusters.pdf') | gpl-3.0 |
poryfly/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
bmcfee/pescador | docs/conf.py | 1 | 10180 | # -*- coding: utf-8 -*-
#
# pescador documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 3 10:03:34 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../pescador'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
# 'sphinx.ext.coverage',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.doctest',
'numpydoc',
'sphinx_gallery.gen_gallery',
]
# Configuration for sphinx-gallery
sphinx_gallery_conf = {
# Path to examples scripts
'examples_dirs': '../examples',
# Path to where to save gallery generated examples
'gallery_dirs': 'auto_examples',
'reference_url': {
'sphinx_gallery': None,
'numpy': 'http://docs.scipy.org/doc/numpy/',
'np': 'http://docs.scipy.org/doc/numpy/',
'scipy': 'http://docs.scipy.org/doc/scipy/reference/',
'matplotlib': 'http://matplotlib.org/',
'sklearn': 'http://scikit-learn.org/stable/',
'keras': None,
'theano': 'http://deeplearning.net/software/theano/'
},
'default_thumb_file': 'noun_199.png',
'backreferences_dir': False,
}
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from glob import glob
autosummary_generate = glob('*.rst')
# Include the __init__ doc as well as the class
autoclass_content = 'class'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pescador'
authors = u'Pescador development team'
copyright = u'2016, {}'.format(authors)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import six
if six.PY3:
from unittest.mock import MagicMock as Mock
else:
from mock import Mock
MOCK_MODULES = ['numpy', 'scipy',
'joblib.parallel', 'joblib._parallel_backends', 'joblib',
'zmq',
'json', 'ujson',
'multiprocessing']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import imp
pescador_version = imp.load_source('pescador.version', '../pescador/version.py')
# The short X.Y version.
version = pescador_version.short_version
relaese = pescador_version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Numpydoc --
numpydoc_show_class_members = False
intersphinx_mapping = {'python': ('https://docs.python.org/2', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'np': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pescadordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pescador.tex', u'pescador Documentation',
authors),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pescador', u'pescador Documentation',
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pescador', u'pescador Documentation',
authors, 'pescador', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| isc |
annapowellsmith/openpresc | openprescribing/frontend/tests/commands/test_import_ppu_savings.py | 1 | 5252 | from datetime import date
import os
from gcutils.bigquery import Client
from django.test import TestCase
import pandas as pd
from mock import patch
from frontend import bq_schemas
from frontend.management.commands import import_ppu_savings
from frontend.models import PPUSaving, PCT, Practice, Chemical
class BigqueryFunctionalTests(TestCase):
@classmethod
def setUpTestData(cls):
bassetlaw = PCT.objects.create(code='02Q', org_type='CCG')
lincs_west = PCT.objects.create(code='04D', org_type='CCG')
lincs_east = PCT.objects.create(code='03T', org_type='CCG',
open_date='2013-04-01',
close_date='2015-01-01')
Chemical.objects.create(bnf_code='0703021Q0',
chem_name='Desogestrel')
Chemical.objects.create(bnf_code='0408010A0',
chem_name='Levetiracetam')
Practice.objects.create(code='C84001', ccg=bassetlaw,
name='LARWOOD SURGERY', setting=4)
Practice.objects.create(code='C84024', ccg=bassetlaw,
name='NEWGATE MEDICAL GROUP', setting=4)
Practice.objects.create(code='B82005', ccg=bassetlaw,
name='PRIORY MEDICAL GROUP', setting=4,
open_date='2015-01-01')
Practice.objects.create(code='B82010', ccg=bassetlaw,
name='RIPON SPA SURGERY', setting=4)
Practice.objects.create(code='A85017', ccg=bassetlaw,
name='BEWICK ROAD SURGERY', setting=4)
Practice.objects.create(code='A86030', ccg=bassetlaw,
name='BETTS AVENUE MEDICAL GROUP', setting=4)
Practice.objects.create(code='C83051', ccg=lincs_west,
name='ABBEY MEDICAL PRACTICE', setting=4)
Practice.objects.create(code='C83019', ccg=lincs_east,
name='BEACON MEDICAL PRACTICE', setting=4)
# Ensure we only include open practices in our calculations.
Practice.objects.create(code='B82008', ccg=bassetlaw,
name='NORTH SURGERY', setting=4,
open_date='2010-04-01',
close_date='2012-01-01')
# Ensure we only include standard practices in our calculations.
Practice.objects.create(code='Y00581', ccg=bassetlaw,
name='BASSETLAW DRUG & ALCOHOL SERVICE',
setting=1)
fixtures_base_path = os.path.join(
'frontend', 'tests', 'fixtures', 'commands',
)
client = Client('hscic')
prescribing_fixture_path = os.path.join(
fixtures_base_path,
'prescribing_bigquery_fixture.csv'
)
table = client.get_or_create_table(
'normalised_prescribing_standard',
bq_schemas.PRESCRIBING_SCHEMA
)
table.insert_rows_from_csv(prescribing_fixture_path)
practices_fixture_path = os.path.join(
fixtures_base_path,
'practices.csv'
)
table = client.get_or_create_table(
'practices',
bq_schemas.PRACTICE_SCHEMA
)
table.insert_rows_from_csv(practices_fixture_path)
tariff_path = os.path.join(fixtures_base_path, 'tariff_fixture.csv')
table = client.get_or_create_table('tariff', bq_schemas.TARIFF_SCHEMA)
table.insert_rows_from_csv(tariff_path)
bnf_path = os.path.join(
fixtures_base_path,
'bnf_codes_for_ppu_savings.csv'
)
table = client.get_or_create_table('bnf', bq_schemas.BNF_SCHEMA)
table.insert_rows_from_csv(bnf_path)
month = date(2015, 9, 1)
dummy_substitutions = pd.read_csv(
os.path.join(fixtures_base_path, 'ppu_substitutions.csv'))
with patch(
'frontend.management.commands.import_ppu_savings.pd.read_csv',
return_value=dummy_substitutions):
import_ppu_savings.Command().handle(month=month)
def test_savings_created_correctly(self):
ccg_saving = PPUSaving.objects.get(
presentation_id='0408010A0AAAAAA',
pct_id='02Q',
practice_id__isnull=True
)
practice_saving = PPUSaving.objects.get(
presentation_id='0408010A0AAAAAA',
pct_id='02Q',
practice_id__isnull=False
)
for saving in [ccg_saving, practice_saving]:
# There's only one saving, so they should be identical
# apart from the practice_id
self.assertEqual(saving.lowest_decile, 10.0)
self.assertEqual(saving.pct_id, '02Q')
self.assertEqual(saving.formulation_swap, 'Tab / Cap')
self.assertEqual(saving.price_per_unit, 10050.0)
self.assertEqual(saving.presentation_id, '0408010A0AAAAAA')
self.assertEqual(saving.possible_savings, 100400.0)
self.assertEqual(saving.date, date(2015, 9, 1))
self.assertEqual(saving.quantity, 10)
| mit |
jundongl/scikit-feast | skfeature/function/sparse_learning_based/MCFS.py | 3 | 2089 | import scipy
import numpy as np
from sklearn import linear_model
from skfeature.utility.construct_W import construct_W
def mcfs(X, n_selected_features, **kwargs):
"""
This function implements unsupervised feature selection for multi-cluster data.
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
n_selected_features: {int}
number of features to select
kwargs: {dictionary}
W: {sparse matrix}, shape (n_samples, n_samples)
affinity matrix
n_clusters: {int}
number of clusters (default is 5)
Output
------
W: {numpy array}, shape(n_features, n_clusters)
feature weight matrix
Reference
---------
Cai, Deng et al. "Unsupervised Feature Selection for Multi-Cluster Data." KDD 2010.
"""
# use the default affinity matrix
if 'W' not in kwargs:
W = construct_W(X)
else:
W = kwargs['W']
# default number of clusters is 5
if 'n_clusters' not in kwargs:
n_clusters = 5
else:
n_clusters = kwargs['n_clusters']
# solve the generalized eigen-decomposition problem and get the top K
# eigen-vectors with respect to the smallest eigenvalues
W = W.toarray()
W = (W + W.T) / 2
W_norm = np.diag(np.sqrt(1 / W.sum(1)))
W = np.dot(W_norm, np.dot(W, W_norm))
WT = W.T
W[W < WT] = WT[W < WT]
eigen_value, ul = scipy.linalg.eigh(a=W)
Y = np.dot(W_norm, ul[:, -1*n_clusters-1:-1])
# solve K L1-regularized regression problem using LARs algorithm with cardinality constraint being d
n_sample, n_feature = X.shape
W = np.zeros((n_feature, n_clusters))
for i in range(n_clusters):
clf = linear_model.Lars(n_nonzero_coefs=n_selected_features)
clf.fit(X, Y[:, i])
W[:, i] = clf.coef_
return W
def feature_ranking(W):
"""
This function computes MCFS score and ranking features according to feature weights matrix W
"""
mcfs_score = W.max(1)
idx = np.argsort(mcfs_score, 0)
idx = idx[::-1]
return idx | gpl-2.0 |
Zhenxingzhang/kaggle-cdiscount-classification | src/inference/inference_inception_v3_ft.py | 1 | 6742 | import os
import numpy as np
import pandas as pd
import tensorflow as tf
import csv
from src.common import consts
from src.data_preparation import dataset
from src.models import denseNN
from src.common import paths
import argparse
import yaml
from src.training.train_model import neural_model
from tensorflow.contrib.slim.nets import inception
from src.training.fine_tune_inception_v3 import decode_img
def infer_test(model_name, x_, output_probs_, batch_size, test_tfrecords_files, test_prediction_csv):
_, one_hot_decoder = dataset.one_hot_label_encoder(csv_path="data/category_names.csv")
with tf.Session().as_default() as sess:
lines = open(os.path.join(paths.CHECKPOINTS_DIR, model_name, str(LEARNING_RATE), model_name + '_latest')).read().split('\n')
latest_checkpoint = [l.split(':')[1].replace('"', '').strip() for l in lines if 'model_checkpoint_path:' in l][0]
check_point_path = os.path.join(paths.CHECKPOINTS_DIR, model_name, str(LEARNING_RATE), latest_checkpoint)
print("Restore from {}".format(check_point_path))
saver.restore(sess, latest_checkpoint)
ds, filename = dataset.test_image_dataset()
ds_iter = ds.batch(batch_size).make_initializable_iterator()
sess.run(ds_iter.initializer, feed_dict={filename: test_tfrecords_files})
tf.global_variables_initializer().run()
with open(test_prediction_csv, 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(["_id", "category_id", "prob"])
try:
while True:
test_batch = sess.run(ds_iter.get_next())
ids = test_batch['_id']
batch_images_raw = test_batch[consts.IMAGE_RAW_FIELD]
batch_images = np.array(map(decode_img, batch_images_raw))
print(ids.shape)
probabilities_ = sess.run(output_probs_, feed_dict={x_: batch_images})
pred_labels = one_hot_decoder(probabilities_)
max_probs = np.apply_along_axis(np.amax, 1, probabilities_)
for (_id, pred_label, prob) in zip(ids, pred_labels, max_probs):
csv_writer.writerow([_id, pred_label, prob])
except tf.errors.OutOfRangeError:
print('End of the dataset')
# agg_test_df.to_csv(paths.TEST_PREDICTIONS, index_label='id', float_format='%.17f')
print('predictions saved to %s' % test_prediction_csv)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Default argument')
parser.add_argument('-c', dest="config_filename", type=str, required=True, help='the config file name')
parser.add_argument('-i', dest="tfrecord_filename", type=str, required=False, help='tfrecord file')
parser.add_argument('-o', dest="csv_filename", type=str, required=False, help='csv')
args = parser.parse_args()
with open(args.config_filename, 'r') as yml_file:
cfg = yaml.load(yml_file)
BATCH_SIZE = cfg["TEST"]["BATCH_SIZE"]
if args.tfrecord_filename is not None:
TEST_TF_RECORDS = args.tfrecord_filename
else:
TEST_TF_RECORDS = cfg["TEST"]["TEST_TF_RECORDS"]
if args.csv_filename is not None:
TEST_OUTPUT_CSV = args.csv_filename
else:
TEST_OUTPUT_CSV = cfg["TEST"]["OUTPUT_CSV_PATH"]
MODEL_NAME = cfg["MODEL"]["MODEL_NAME"]
MODEL_LAYERS = cfg["MODEL"]["MODEL_LAYERS"]
LEARNING_RATE = cfg["TRAIN"]["LEARNING_RATE"]
print("Testing model name: {}".format(MODEL_NAME))
print("Testing data: {}".format(TEST_TF_RECORDS))
print("Testing output: {}".format(TEST_OUTPUT_CSV))
batch_shape = [None, 180, 180, 3]
slim = tf.contrib.slim
_, one_hot_decoder = dataset.one_hot_label_encoder(csv_path="data/category_names.csv")
with tf.Graph().as_default():
x_input = tf.placeholder(tf.float32, shape=batch_shape)
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(x_input,
num_classes=5270,
is_training=True,
dropout_keep_prob=1.0)
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
predicted_labels = end_points['Predictions']
print(end_points['Predictions'].shape)
with tf.Session().as_default() as sess:
ds, filename = dataset.test_image_dataset()
ds_iter = ds.batch(BATCH_SIZE).make_initializable_iterator()
sess.run(ds_iter.initializer, feed_dict={filename: TEST_TF_RECORDS})
tf.global_variables_initializer().run()
lines = open(os.path.join(paths.CHECKPOINTS_DIR, MODEL_NAME, str(LEARNING_RATE),
MODEL_NAME + '_latest')).read().split('\n')
latest_checkpoint = \
[l.split(':')[1].replace('"', '').strip() for l in lines if 'model_checkpoint_path:' in l][0]
check_point_path = os.path.join(paths.CHECKPOINTS_DIR, MODEL_NAME, str(LEARNING_RATE), latest_checkpoint)
print("Restore from {}".format(check_point_path))
saver.restore(sess, check_point_path)
with open(TEST_OUTPUT_CSV, 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(["_id", "category_id", "prob"])
try:
while True:
test_batch = sess.run(ds_iter.get_next())
ids = test_batch['_id']
batch_images_raw = test_batch[consts.IMAGE_RAW_FIELD]
batch_images = np.array(map(decode_img, batch_images_raw))
print(ids.shape)
probabilities_ = sess.run(predicted_labels, feed_dict={x_input: batch_images})
pred_labels = one_hot_decoder(probabilities_)
max_probs = np.apply_along_axis(np.amax, 1, probabilities_)
for (_id, pred_label, prob) in zip(ids, pred_labels, max_probs):
csv_writer.writerow([_id, pred_label, prob])
except tf.errors.OutOfRangeError:
print('End of the dataset')
# agg_test_df.to_csv(paths.TEST_PREDICTIONS, index_label='id', float_format='%.17f')
print('predictions saved to %s' % TEST_OUTPUT_CSV)
| apache-2.0 |
vidartf/hyperspy | hyperspy/drawing/signal1d.py | 1 | 14204 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
from hyperspy.events import Event, Events
class Signal1DFigure(BlittedFigure):
"""
"""
def __init__(self, title=""):
super(Signal1DFigure, self).__init__()
self.figure = None
self.ax = None
self.right_ax = None
self.ax_lines = list()
self.right_ax_lines = list()
self.ax_markers = list()
self.axes_manager = None
self.right_axes_manager = None
# Labels
self.xlabel = ''
self.ylabel = ''
self.title = title
self.create_figure()
self.create_axis()
# Color cycles
self._color_cycles = {
'line': utils.ColorCycle(),
'step': utils.ColorCycle(),
'scatter': utils.ColorCycle(), }
def create_figure(self):
self.figure = utils.create_figure(
window_title="Figure " + self.title if self.title
else None)
utils.on_figure_window_close(self.figure, self._on_close)
self.figure.canvas.mpl_connect('draw_event', self._on_draw)
def create_axis(self):
self.ax = self.figure.add_subplot(111)
self.ax.yaxis.set_animated(True)
self.ax.xaxis.set_animated(True)
self.ax.hspy_fig = self
def create_right_axis(self):
if self.ax is None:
self.create_axis()
if self.right_ax is None:
self.right_ax = self.ax.twinx()
self.right_ax.hspy_fig = self
self.right_ax.yaxis.set_animated(True)
def add_line(self, line, ax='left'):
if ax == 'left':
line.ax = self.ax
if line.axes_manager is None:
line.axes_manager = self.axes_manager
self.ax_lines.append(line)
line.sf_lines = self.ax_lines
elif ax == 'right':
line.ax = self.right_ax
self.right_ax_lines.append(line)
line.sf_lines = self.right_ax_lines
if line.axes_manager is None:
line.axes_manager = self.right_axes_manager
line.axis = self.axis
# Automatically asign the color if not defined
if line.color is None:
line.color = self._color_cycles[line.type]()
# Or remove it from the color cycle if part of the cycle
# in this round
else:
rgba_color = mpl.colors.colorConverter.to_rgba(line.color)
if rgba_color in self._color_cycles[line.type].color_cycle:
self._color_cycles[line.type].color_cycle.remove(
rgba_color)
def plot(self):
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.set_title(self.title)
x_axis_upper_lims = []
x_axis_lower_lims = []
for line in self.ax_lines:
line.plot()
x_axis_lower_lims.append(line.axis.axis[0])
x_axis_upper_lims.append(line.axis.axis[-1])
for marker in self.ax_markers:
marker.plot()
plt.xlim(np.min(x_axis_lower_lims), np.max(x_axis_upper_lims))
# To be discussed
self.axes_manager.events.indices_changed.connect(self.update, [])
self.events.closed.connect(
lambda: self.axes_manager.events.indices_changed.disconnect(
self.update), [])
if hasattr(self.figure, 'tight_layout'):
try:
self.figure.tight_layout()
except:
# tight_layout is a bit brittle, we do this just in case it
# complains
pass
def _on_close(self):
if self.figure is None:
return # Already closed
for line in self.ax_lines + self.right_ax_lines:
line.close()
super(Signal1DFigure, self)._on_close()
def update(self):
for marker in self.ax_markers:
marker.update()
for line in self.ax_lines + \
self.right_ax_lines:
line.update()
# To be discussed
# self.ax.hspy_fig._draw_animated()
class Signal1DLine(object):
"""Line that can be added to Signal1DFigure.
Attributes
----------
type : {'scatter', 'step', 'line'}
Select the line drawing style.
line_properties : dictionary
Accepts a dictionary of valid (i.e. recognized by mpl.plot)
containing valid line properties. In addition it understands
the keyword `type` that can take the following values:
{'scatter', 'step', 'line'}
Methods
-------
set_line_properties
Enables setting the line_properties attribute using keyword
arguments.
Raises
------
ValueError
If an invalid keyword value is passed to line_properties.
"""
def __init__(self):
self.events = Events()
self.events.closed = Event("""
Event that triggers when the line is closed.
Arguments:
obj: Signal1DLine instance
The instance that triggered the event.
""", arguments=["obj"])
self.sf_lines = None
self.ax = None
# Data attributes
self.data_function = None
self.axis = None
self.axes_manager = None
self.auto_update = True
self.get_complex = False
# Properties
self.line = None
self.autoscale = False
self.plot_indices = False
self.text = None
self.text_position = (-0.1, 1.05,)
self._line_properties = {}
self.type = "line"
@property
def line_properties(self):
return self._line_properties
@line_properties.setter
def line_properties(self, kwargs):
if 'type' in kwargs:
self.type = kwargs['type']
del kwargs['type']
if 'color' in kwargs:
color = kwargs['color']
del kwargs['color']
self.color = color
for key, item in kwargs.items():
if item is None and key in self._line_properties:
del self._line_properties[key]
else:
self._line_properties[key] = item
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw()
def set_line_properties(self, **kwargs):
self.line_properties = kwargs
@property
def type(self):
return self._type
@type.setter
def type(self, value):
lp = {}
if value == 'scatter':
lp['marker'] = 'o'
lp['linestyle'] = 'None'
lp['markersize'] = 1
elif value == 'line':
lp['linestyle'] = '-'
lp['marker'] = "None"
lp['drawstyle'] = "default"
elif value == 'step':
lp['drawstyle'] = 'steps-mid'
lp['marker'] = "None"
else:
raise ValueError(
"`type` must be one of "
"{\'scatter\', \'line\', \'step\'}"
"but %s was given" % value)
self._type = value
self.line_properties = lp
if self.color is not None:
self.color = self.color
@property
def color(self):
if 'color' in self.line_properties:
return self.line_properties['color']
elif 'markeredgecolor' in self.line_properties:
return self.line_properties['markeredgecolor']
else:
return None
@color.setter
def color(self, color):
if self._type == 'scatter':
self.set_line_properties(markeredgecolor=color)
if 'color' in self._line_properties:
del self._line_properties['color']
else:
if color is None and 'color' in self._line_properties:
del self._line_properties['color']
else:
self._line_properties['color'] = color
self.set_line_properties(markeredgecolor=None)
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw()
def plot(self, data=1):
f = self.data_function
if self.get_complex is False:
data = f(axes_manager=self.axes_manager).real
else:
data = f(axes_manager=self.axes_manager).imag
if self.line is not None:
self.line.remove()
self.line, = self.ax.plot(self.axis.axis, data,
**self.line_properties)
self.line.set_animated(True)
self.axes_manager.events.indices_changed.connect(self.update, [])
self.events.closed.connect(
lambda: self.axes_manager.events.indices_changed.disconnect(
self.update), [])
if not self.axes_manager or self.axes_manager.navigation_size == 0:
self.plot_indices = False
if self.plot_indices is True:
if self.text is not None:
self.text.remove()
self.text = self.ax.text(*self.text_position,
s=str(self.axes_manager.indices),
transform=self.ax.transAxes,
fontsize=12,
color=self.line.get_color(),
animated=True)
self.ax.figure.canvas.draw()
def update(self, force_replot=False):
"""Update the current spectrum figure"""
if self.auto_update is False:
return
if force_replot is True:
self.close()
self.plot()
if self.get_complex is False:
ydata = self.data_function(axes_manager=self.axes_manager).real
else:
ydata = self.data_function(axes_manager=self.axes_manager).imag
old_xaxis = self.line.get_xdata()
if len(old_xaxis) != self.axis.size or \
np.any(np.not_equal(old_xaxis, self.axis.axis)):
self.ax.set_xlim(self.axis.axis[0], self.axis.axis[-1])
self.line.set_data(self.axis.axis, ydata)
else:
self.line.set_ydata(ydata)
if self.autoscale is True:
self.ax.relim()
y1, y2 = np.searchsorted(self.axis.axis,
self.ax.get_xbound())
y2 += 2
y1, y2 = np.clip((y1, y2), 0, len(ydata - 1))
clipped_ydata = ydata[y1:y2]
y_max, y_min = (np.nanmax(clipped_ydata),
np.nanmin(clipped_ydata))
if self.get_complex:
yreal = self.data_function(axes_manager=self.axes_manager).real
clipped_yreal = yreal[y1:y2]
y_min = min(y_min, clipped_yreal.min())
y_max = max(y_max, clipped_yreal.max())
self.ax.set_ylim(y_min, y_max)
if self.plot_indices is True:
self.text.set_text(self.axes_manager.indices)
try:
self.ax.hspy_fig._draw_animated()
except:
# There may be errors if the figure does no longer exist.
pass
def close(self):
if self.line in self.ax.lines:
self.ax.lines.remove(self.line)
if self.text and self.text in self.ax.texts:
self.ax.texts.remove(self.text)
if self.sf_lines and self in self.sf_lines:
self.sf_lines.remove(self)
self.events.closed.trigger(obj=self)
for f in self.events.closed.connected:
self.events.closed.disconnect(f)
try:
self.ax.figure.canvas.draw()
except:
pass
def _plot_component(factors, idx, ax=None, cal_axis=None,
comp_label='PC'):
if ax is None:
ax = plt.gca()
if cal_axis is not None:
x = cal_axis.axis
plt.xlabel(cal_axis.units)
else:
x = np.arange(factors.shape[0])
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%s %i' % (comp_label, idx))
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label='PC', no_nans=True, calibrate=True,
cmap=plt.cm.gray):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes_manager._axes[0].low_value,
axes_manager._axes[0].high_value,
axes_manager._axes[1].high_value,
axes_manager._axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape), cmap=cmap, extent=extent,
interpolation='nearest')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes_manager._axes[0].axis
else:
x = np.arange(axes_manager._axes[0].size)
ax.step(x, loadings[idx])
else:
raise ValueError('View not supported')
| gpl-3.0 |
BNIA/tidyall | src/modules/tidydate.py | 2 | 2523 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tidydate
This file declares and implements the TidyDate class for TidyAll.
TidyDate converts and formats valid date columns into ISO 8601 (yyyy-mm-dd).
"""
import sys
from dateutil import parser as date_parser
import numpy as np
import pandas as pd
from .settings import TIDY_DATE_SPLIT
class TidyDate(object):
def __init__(self, df, column):
"""Wraps a TidyDate object around a TidyAll dataframe"""
self.df = df
self.date_col = column
@staticmethod
def parse_date(date_str):
try:
return date_parser.parse(date_str)
except (TypeError, ValueError):
try:
def split_date():
return (date_str[-4:], date_str[:-6], date_str[-6:-4])
return date_parser.parse('-'.join(split_date()))
except (TypeError, ValueError):
return None
def __clean_col(self):
"""Parses and standardizes the selected column values
Args:
None
Returns:
None
"""
if self.date_col:
if np.dtype(self.df[self.date_col]) == np.dtype("datetime64[ns]"):
sys.exit("Column is already in date format")
self.df["tidy_date"] = self.df[self.date_col].apply(
lambda x: self.parse_date(str(x))
)
self.df["tidy_date"] = self.df["tidy_date"].apply(
lambda x: x.date() if pd.notnull(x) else x
)
def __split_date(self):
"""Splits the "tidy_date" column into separate tidy year, month and
day columns
Args:
None
Returns:
None
"""
for index, col in enumerate(TIDY_DATE_SPLIT):
try:
self.df[col] = self.df["tidy_date"].apply(
lambda x: int(str(x).split("-")[index])
if pd.notnull(x) else x
)
except IndexError:
continue
def __fill_na(self):
"""Fills values that were unable to be parsed with the original values
Args:
None
Returns:
None
"""
TIDY_DATE_SPLIT.append("tidy_date")
for col in TIDY_DATE_SPLIT:
self.df[col].fillna(self.df[self.date_col], inplace=True)
def parse(self):
self.__clean_col()
self.__split_date()
self.__fill_na()
return self.df
| mit |
atantet/transferLorenz | plot/plotEigValRho.py | 1 | 6587 | import os
import numpy as np
import matplotlib.pyplot as plt
import pylibconfig2
import ergoPlot
configFile = '../cfg/Lorenz63.cfg'
compName1 = 'x'
compName2 = 'y'
compName3 = 'z'
#readSpec = ergoPlot.readSpectrum
readSpec = ergoPlot.readSpectrumCompressed
cfg = pylibconfig2.Config()
cfg.read_file(configFile)
L = cfg.simulation.LCut + cfg.simulation.spinup
tau = cfg.transfer.tauRng[0]
printStepNum = int(cfg.simulation.printStep / cfg.simulation.dt + 0.1)
caseName = cfg.model.caseName
dim = cfg.model.dim
dimObs = dim
nProc = ''
if (hasattr(cfg.sprinkle, 'nProc')):
nProc = '_nProc' + str(cfg.sprinkle.nProc)
N = np.prod(np.array(cfg.grid.nx))
gridPostfix = ""
for d in np.arange(dimObs):
if (hasattr(cfg.grid, 'nSTDLow') & hasattr(cfg.grid, 'nSTDHigh')):
gridPostfix = "%s_n%dl%dh%d" % (gridPostfix, cfg.grid.nx[d],
cfg.grid.nSTDLow[d],
cfg.grid.nSTDHigh[d])
else:
gridPostfix = "%s_n%dl%dh%d" % (gridPostfix, cfg.grid.nx[d],
cfg.sprinkle.minInitState[d],
cfg.sprinkle.maxInitState[d])
gridPostfix = "_%s%s" % (caseName, gridPostfix)
rhoRng = np.arange(22., 26.01, 0.5)
rhoRng = np.concatenate((rhoRng, np.arange(23.1, 24.91, 0.1)))
rhoRng = np.unique((rhoRng * 10 + 0.1).astype(int)) * 1. / 10
rhoRng = np.concatenate(([20, 21.], rhoRng, [27., 28.]))
nTrajRng = np.ones((rhoRng.shape[0],), dtype=int) * 500000000
nTrajRng[(rhoRng > 23.09) & (rhoRng < 23.99)] = 100000000
nTrajRng[np.argmin(np.abs(rhoRng - 23.5))] = 500000000
# gapRng = np.empty((rhoRng.shape[0],))
eigValRng = np.empty((rhoRng.shape[0], cfg.spectrum.nev))
for irho in np.arange(rhoRng.shape[0]):
rho = rhoRng[irho]
simPostfix = "_L%d_dt%d_nTraj%d%s" \
% (int(tau * 1000 + 0.1),
-np.round(np.log10(cfg.simulation.dt)),
nTrajRng[irho], nProc)
srcPostfixSim = "%s_rho%04d%s" % (gridPostfix, int(rho * 100 + 0.1),
simPostfix)
# Define file names
postfix = srcPostfixSim
eigValForwardFile = '%s/eigval/eigvalForward_nev%d%s.%s' \
% (cfg.general.specDir, cfg.spectrum.nev, postfix,
cfg.general.fileFormat)
# Read transfer operator spectrum from file
# and create a bi-orthonormal basis
# of eigenvectors and backward eigenvectors:
print 'Readig spectrum for tau = %.3f...' % tau
(eigValForward,) = readSpec(eigValForwardFile)
# Get generator eigenvalues (using the complex logarithm)
eigValGen = np.log(eigValForward) / tau
isort = np.argsort(-eigValGen.real)
eigValGen = eigValGen[isort]
# Record spectral gap
eigValRng[irho] = eigValGen[:cfg.spectrum.nev].real
# Remove spurious positive eigenvalues (where do they come from?)
eigValRng[irho, eigValRng[irho] > 1.e-4] = np.nan
isort = np.argsort(-eigValRng[irho])
eigValRng[irho] = eigValRng[irho, isort]
# Plot
dstPostfix = "%s_rhomin%04d_rhomax%04d%s" \
% (gridPostfix, int(rhoRng[0] * 100 + 0.1),
int(rhoRng[-1] * 100 + 0.1), simPostfix)
nevPlot = 7
ls = '-'
mk = '+'
lw = 2
markers = ['o', '+', 'x']
msizes = [8, 8, 6]
colors = ['k', 'k', 'k']
fig = plt.figure()
ax = fig.add_subplot(111)
linestyles = ['-', '--']
for ev in np.arange(nevPlot):
ax.plot(rhoRng, eigValRng[:, ev].real, linestyle=linestyles[ev%2],
linewidth=lw)
ax.set_xlim(rhoRng[0], rhoRng[-1])
ax.set_ylim(-0.12, 0.001)
ax.set_xlabel(r'$\rho$', fontsize=ergoPlot.fs_latex)
ax.set_ylabel(r'$\Re(\lambda_i)$', fontsize=ergoPlot.fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=ergoPlot.fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=ergoPlot.fs_yticklabels)
plt.savefig('%s/spectrum/eigVal/eigVSRho%s.%s'\
% (cfg.general.plotDir, dstPostfix, ergoPlot.figFormat),
dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# selRho = (rhoRng >= 21.) & (rhoRng <= 25.)
# for ev in np.arange(nevPlot):
# ax.plot(rhoRng[selRho], eigValRng[selRho, ev].real,
# linestyle=linestyles[ev%d], linewidth=lw)
# ax.set_xlim(rhoRng[selRho][0], rhoRng[selRho][-1])
# ax.set_ylim(-0.06, 0.002)
# ax.set_xticks(np.arange(21, 25.1, 1))
# ax.set_xlabel(r'$\rho$', fontsize=ergoPlot.fs_latex)
# ax.set_ylabel(r'$\Re(\lambda_i)$', fontsize=ergoPlot.fs_latex)
# plt.setp(ax.get_xticklabels(), fontsize=ergoPlot.fs_xticklabels)
# plt.setp(ax.get_yticklabels(), fontsize=ergoPlot.fs_yticklabels)
# plt.savefig('%s/spectrum/eigVal/eigVSRhoZoom%s.%s'\
# % (cfg.general.plotDir, dstPostfix, ergoPlot.figFormat),
# dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot(rhoRng, np.zeros((rhoRng.shape[0],)), '-k', linewidth=0.5)
# for ev in np.arange(nevPlot):
# ax.plot(rhoRng, eigValRng[:, ev].real, marker=markers[ev],
# color=colors[ev], markersize=msizes[ev],
# linestyle='none', fillstyle='none')
# ax.set_xlim(rhoRng[0], rhoRng[-1])
# ax.set_xlabel(r'$\rho$', fontsize=ergoPlot.fs_latex)
# ax.set_ylabel(r'$\Re(\lambda_i)$', fontsize=ergoPlot.fs_latex)
# plt.setp(ax.get_xticklabels(), fontsize=ergoPlot.fs_xticklabels)
# plt.setp(ax.get_yticklabels(), fontsize=ergoPlot.fs_yticklabels)
# plt.savefig('%s/spectrum/eigVal/eigVSRho%s.%s'\
# % (cfg.general.plotDir, dstPostfix, ergoPlot.figFormat),
# dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# selRho = (rhoRng >= 21.) & (rhoRng <= 25.)
# ax.plot(rhoRng[selRho], np.zeros((rhoRng[selRho].shape[0],)), '-k',
# linewidth=0.5)
# for ev in np.arange(nevPlot):
# ax.plot(rhoRng[selRho], eigValRng[selRho, ev].real, marker=markers[ev],
# color=colors[ev], markersize=msizes[ev],
# linestyle='none', fillstyle='none')
# ax.set_xlim(rhoRng[selRho][0], rhoRng[selRho][-1])
# ax.set_ylim(-0.06, 0.002)
# ax.set_xticks(np.arange(21, 25.1, 1))
# ax.set_xlabel(r'$\rho$', fontsize=ergoPlot.fs_latex)
# ax.set_ylabel(r'$\Re(\lambda_i)$', fontsize=ergoPlot.fs_latex)
# plt.setp(ax.get_xticklabels(), fontsize=ergoPlot.fs_xticklabels)
# plt.setp(ax.get_yticklabels(), fontsize=ergoPlot.fs_yticklabels)
# plt.savefig('%s/spectrum/eigVal/eigVSRhoZoom%s.%s'\
# % (cfg.general.plotDir, dstPostfix, ergoPlot.figFormat),
# dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
| gpl-3.0 |
nomadcube/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
jougs/nest-simulator | pynest/examples/BrodyHopfield.py | 7 | 4218 | # -*- coding: utf-8 -*-
#
# BrodyHopfield.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Spike synchronization through subthreshold oscillation
------------------------------------------------------------
This script reproduces the spike synchronization behavior
of integrate-and-fire neurons in response to a subthreshold
oscillation. This phenomenon is shown in Fig. 1 of [1]_
Neurons receive a weak 35 Hz oscillation, a gaussian noise current
and an increasing DC. The time-locking capability is shown to
depend on the input current given. The result is then plotted using
matplotlib. All parameters are taken from the above paper.
References
~~~~~~~~~~~~~
.. [1] Brody CD and Hopfield JJ (2003). Simple networks for
spike-timing-based computation, with application to olfactory
processing. Neuron 37, 843-852.
"""
#################################################################################
# First, we import all necessary modules for simulation, analysis, and plotting.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
###############################################################################
# Second, the simulation parameters are assigned to variables.
N = 1000 # number of neurons
bias_begin = 140. # minimal value for the bias current injection [pA]
bias_end = 200. # maximal value for the bias current injection [pA]
T = 600 # simulation time (ms)
# parameters for the alternating-current generator
driveparams = {'amplitude': 50., 'frequency': 35.}
# parameters for the noise generator
noiseparams = {'mean': 0.0, 'std': 200.}
neuronparams = {'tau_m': 20., # membrane time constant
'V_th': 20., # threshold potential
'E_L': 10., # membrane resting potential
't_ref': 2., # refractory period
'V_reset': 0., # reset potential
'C_m': 200., # membrane capacitance
'V_m': 0.} # initial membrane potential
###############################################################################
# Third, the nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
neurons = nest.Create('iaf_psc_alpha', N)
sr = nest.Create('spike_recorder')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
###############################################################################
# Set the parameters specified above for the generators using ``set``.
drive.set(driveparams)
noise.set(noiseparams)
###############################################################################
# Set the parameters specified above for the neurons. Neurons get an internal
# current. The first neuron additionally receives the current with amplitude
# `bias_begin`, the last neuron with amplitude `bias_end`.
neurons.set(neuronparams)
neurons.I_e = [(n * (bias_end - bias_begin) / N + bias_begin)
for n in range(1, len(neurons) + 1)]
###############################################################################
# Connect alternating current and noise generators as well as
# `spike_recorder`s to neurons
nest.Connect(drive, neurons)
nest.Connect(noise, neurons)
nest.Connect(neurons, sr)
###############################################################################
# Simulate the network for time `T`.
nest.Simulate(T)
###############################################################################
# Plot the raster plot of the neuronal spiking activity.
nest.raster_plot.from_device(sr, hist=True)
plt.show()
| gpl-2.0 |
blaisb/cfdemUtilities | graph/contour_demo.py | 14 | 3478 | #!/usr/bin/env python
"""
Illustrate simple contour plotting, contours on an image with
a colorbar for the contours, and labelled contours.
See also contour_image.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
# contour labels can be placed manually by providing list of positions
# (in data coordinate). See ginput_manual_clabel.py for interactive
# placement.
plt.figure()
CS = plt.contour(X, Y, Z)
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5), (1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.clabel(CS, inline=1, fontsize=10, manual=manual_locations)
plt.title('labels at selected locations')
# You can force all the contours to be the same color.
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours dashed')
# You can set negative contours to be solid instead of dashed:
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours solid')
# And you can manually specify the colors of the contour
plt.figure()
CS = plt.contour(X, Y, Z, 6,
linewidths=np.arange(.5, 4, .5),
colors=('r', 'green', 'blue', (1,1,0), '#afeeee', '0.5')
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Crazy lines')
# Or you can use a colormap to specify the colors; the default
# colormap will be used for the contour lines
plt.figure()
im = plt.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3,3,-2,2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = plt.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3,3,-2,2))
#Thicken the zero contour.
zc = CS.collections[6]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=14)
# make a colorbar for the contour lines
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.title('Lines with colorbar')
#plt.hot() # Now change the colormap for the contour lines and colorbar
plt.flag()
# We can still add a colorbar for the image, too.
CBI = plt.colorbar(im, orientation='horizontal', shrink=0.8)
# This makes the original colorbar look a bit out of place,
# so let's improve its position.
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = CB.ax.get_position().bounds
CB.ax.set_position([ll, b+0.1*h, ww, h*0.8])
plt.show()
| lgpl-3.0 |
jgurtowski/ectools | lengthhist.py | 1 | 1997 | #!/usr/bin/env python
import sys
from operator import attrgetter, itemgetter
from itertools import imap
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from nucio import lineRecordIterator, M4Record, M4RecordTypes, lineItemIterator
from args import arglist, parseArgs, getHelpStr, CLArgument, argflag
description = ("lengthhist.py [options] title alginments.m4 [alignments2.m4...]\n"
"Plot Make histograms from alignment (nucmer show-coords) "
"files")
argument_list = [["labels","labels",arglist,["Ectools","PacbioToCA"], "Labels for each dataset"],
["ymax", "ymax", int, -1, "Maximum Y-value"],
["lenfiles", "lenfiles", int, -1, ("Input files are just a column of lengths, "
"argument specifies the column")]]
args = map(CLArgument._make, argument_list)
(parg_map, args_remaining) = parseArgs(sys.argv[1:],args)
if not len(args_remaining) >= 3:
sys.exit(getHelpStr(description,args) + "\n")
title = args_remaining[0]
igetter = attrgetter("qseqlength")
flens = []
for fname in args_remaining[1:]:
with open(fname) as fh:
if parg_map["lenfiles"] < 0:
records = lineRecordIterator(fh, M4Record, M4RecordTypes)
flens.append( map(igetter, records) )
else:
getter = itemgetter(parg_map["lenfiles"])
flens.append( map(int, imap(getter, lineItemIterator(fh) )))
pp = PdfPages("hist.pdf")
fig = plt.figure()
fig.suptitle(args_remaining[0])
len_binsize = 250
plt.xlabel("Read Length")
plt.ylabel("Frequency")
if parg_map["ymax"] > 0:
plt.ylim(1,parg_map["ymax"])
labels = parg_map["labels"]
colors = ["red","blue","green","black"]
plt.hist(flens, bins=np.arange(0,50000,len_binsize), histtype='step', color=colors[:len(labels)],label=labels)
plt.legend()
plt.savefig(pp, format="pdf")
pp.close()
| bsd-3-clause |
blink1073/oct2py | setup.py | 1 | 2074 | """Setup script for oct2py package.
"""
import glob
DISTNAME = 'oct2py'
DESCRIPTION = 'Python to GNU Octave bridge --> run m-files from python.'
LONG_DESCRIPTION = open('README.rst', 'rb').read().decode('utf-8')
MAINTAINER = 'Steven Silvester'
MAINTAINER_EMAIL = 'steven.silvester@ieee.org'
URL = 'http://github.com/blink1073/oct2py'
LICENSE = 'MIT'
REQUIRES = ["numpy (>= 1.12)", "scipy (>= 0.17)", "octave_kernel (>= 0.31.0)"]
INSTALL_REQUIRES = ["octave_kernel >= 0.31.0", "numpy >= 1.12", "scipy >= 0.17"]
EXTRAS_REQUIRE = {
'test': ['pytest', 'pandas', 'nbconvert'],
'docs': ['sphinx', 'sphinx-bootstrap-theme', 'numpydoc']
}
PACKAGES = [DISTNAME, '%s.tests' % DISTNAME, '%s/ipython' % DISTNAME,
'%s/ipython/tests' % DISTNAME]
PACKAGE_DATA = {DISTNAME: ['*.m'] + glob.glob('%s/**/*.m' % DISTNAME)}
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering
Topic :: Software Development
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os
_version_py = os.path.join('.', 'oct2py', '_version.py')
version_ns = {}
with open(_version_py, mode='r') as version_file:
exec(version_file.read(), version_ns)
setup(
name=DISTNAME,
version=version_ns['__version__'],
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
packages=PACKAGES,
package_data=PACKAGE_DATA,
include_package_data=True,
url=URL,
download_url=URL,
license=LICENSE,
platforms=["Any"],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/x-rst',
classifiers=list(filter(None, CLASSIFIERS.split('\n'))),
requires=REQUIRES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE
)
| mit |
huzq/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 71 | 3578 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1]_ and
illustrates the difference in performance between the discrete SAMME [2]_
boosting algorithm and real SAMME.R boosting algorithm. Both algorithms are
evaluated on a binary classification task where the target Y is a non-linear
function of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/metrics/tests/test_classification.py | 53 | 49781 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
hakyimlab/MetaXcan-Postprocess | source/ProcessMetaXcanResultsFolder.py | 1 | 3889 | #! /usr/bin/env python
__author__ = 'heroico'
import os
import re
import logging
import numpy
import Gencode
import Logging
import Utilities
import pandas
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
dplyr = importr('dplyr', on_conflict="warn")
#import rpy2.robjects.lib.dplyr as dplyr
robjects.r.source("Plots.R")
robjects.r.source("PlotsQQ.R")
from rpy2.robjects import numpy2ri
numpy2ri.activate()
def load_metaxcan_result(path):
d = pandas.read_csv(path)
d = d.iloc[numpy.isfinite(d.zscore.values)]
d = d[["gene_name", "zscore", "pvalue"]]
d = robjects.r['data.frame'](
gene_name = robjects.StrVector(d["gene_name"].values.tolist()),
zscore = robjects.FloatVector(d["zscore"].values.tolist()),
pvalue = robjects.FloatVector(d["pvalue"].values.tolist()),
stringsAsFactors=False
)
return d
def annotation(path, only_numeric_chromosomes=True):
CHROMOSOME = Gencode.GFTF.K_CHROMOSOME
START_LOCATION = Gencode.GFTF.K_START_LOCATION
GENE_NAME = Gencode.GFTF.K_GENE_NAME
gencode = Gencode.load_gene_annotation(path)
gencode.chromosome = gencode[CHROMOSOME].str.replace("chr", "")
if only_numeric_chromosomes:
gencode = gencode[gencode[[CHROMOSOME]].apply(lambda x: x[0].isdigit(), axis=1)]
gencode = robjects.r['data.frame'](
chromosome=robjects.StrVector(gencode[CHROMOSOME].values.tolist()),
start_location=robjects.IntVector(gencode[START_LOCATION].values.tolist()),
gene_name = robjects.StrVector(gencode[GENE_NAME].values.tolist()),
stringsAsFactors=False)
return gencode
def process(gene_annotation, in_path, out_prefix):
logging.info("Processing %s", in_path)
d = load_metaxcan_result(in_path)
qq_path = out_prefix+"_qqplot.png"
robjects.r.do_qq_unif_plot(d, qq_path, "QQ Plot")
m = dplyr.inner_join(d, gene_annotation, by="gene_name")
manhattan_path = out_prefix+"_manhattan.png"
robjects.r.do_manhattan_plot_from_data(m, manhattan_path)
def run(args):
logging.info("Loading gencode")
gene_annotation = annotation(args.gencode_file)
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
pattern = re.compile(args.pattern) if args.pattern else None
files = Utilities.filesFromFolder(args.input_folder, pattern)
for f in files:
in_path = os.path.join(args.input_folder, f)
out_path = os.path.join(args.output_folder, f)
out_prefix = os.path.splitext(out_path)[0]
process(gene_annotation, in_path, out_prefix)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Batch processing of metaxcan results in a folder')
parser.add_argument("--input_folder",
help="name of folder with zscore files",
default=None)
parser.add_argument("--pattern",
help="name pattern to select files",
default=None)
parser.add_argument("--output_folder",
help="where to output stuff",
default="results/images")
parser.add_argument("--gencode_file",
help="path of gencode gene information digest",
default="data/gencode.v18.genes.patched_contigs.summary.protein")
parser.add_argument("--verbosity",
help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything",
default = "10")
parser.add_argument("--throw",
action="store_true",
help="Throw exception on error",
default=False)
args = parser.parse_args()
Logging.configureLogging(int(args.verbosity))
run(args) | mit |
rneher/FitnessInference | flu/figure_scripts/flu_examples_fig4ab_polarizer.py | 1 | 3665 | #!/ebio/ag-neher/share/programs/bin/python2.7
#
#makes figure 3ab consisting of 2 trees of year 2007 colored with the prediction
#for the external nodes. It also produces a joint tree with the following season
#to illustrates the prediction. In the actual figure, the trees are rotated by 90degrees
#but without changing the scale. This script is based on analyze_HA1.py
#
import matplotlib
matplotlib.use('pdf')
import sys
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/flu/src')
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/prediction_src')
import test_flu_prediction as test_flu
import predict_flu as flu
import tree_utils
from Bio import Phylo,AlignIO,SeqIO
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import glob,pickle,gzip,os,argparse
from datetime import date
file_formats = ['.pdf', '.svg']
plt.rcParams.update(test_flu.mpl_params)
figure_folder = '../figures_ms/'
analysis_folder = test_flu.flu_analysis_folder
# parse the commandline arguments
parser = test_flu.make_flu_parser()
parser.add_argument('--tau', default = 1.0, type = float, help= 'memory time scale of the tree polarizer')
params=parser.parse_args()
# get name snippets to link output files to run parameters
base_name, name_mod = test_flu.get_fname(params)
year = params.year
params.gamma=1.0
params.diffusion=1.0
# set up the prediction and pass all parameters to the wrapper function
prediction = test_flu.predict_params(['polarizer'],
params)
prediction.calculate_polarizers(params.tau)
# define the methodes for which the predictions are to be evaluated
methods = [ ('polarizer', '_ext', prediction.terminals),
('polarizer', '_int', prediction.non_terminals)]
distances, distances_epi, test_data = test_flu.evaluate(prediction, methods, params)
# add prediction if we have a useful one
laessig_prediction = test_flu.get_LL(params.flutype)
if params.flutype=='H3N2' and (year in laessig_prediction) and (laessig_prediction[year].name not in [c.name for c in prediction.terminals]):
otherseqsnames = [laessig_prediction[year].name]
else:
otherseqsnames=[]
combined_data = test_flu.make_combined_data(prediction, test_data, otherseqsnames)
seq_labels = {prediction.best_node('polarizer').name:'*'} #, laessig_prediction[year].name:"L&L"}
tree_utils.label_nodes(prediction.T, seq_labels)
tree_utils.label_nodes(combined_data.T, seq_labels)
# plot a combined figure
fig = plt.figure(figsize = (12,6))
#subplot 1: only the prediction data
ax = plt.subplot(121)
#add panel label
plt.text(-0.06,0.95,'A', transform = plt.gca().transAxes, fontsize = 36)
plt.title('until Feb '+str(year))
plt.tight_layout()
tree_utils.plot_prediction_tree(prediction, method='polarizer', axes=ax, cb=True, offset = 0.0005, internal=False)
#subplot 2: prediction data and test data
ax = plt.subplot(122)
#add panel label
plt.text(-0.06,0.95,'B', transform = plt.gca().transAxes, fontsize = 36)
plt.title('until Feb '+str(year)+' + season '+str(year)+"/"+str(year+1)+" (grey)")
pred_names = [c.name for c in prediction.terminals]
tree_utils.erase_color(combined_data.T)
prediction.color_other_tree(combined_data.T.get_terminals(), method='polarizer', offset = 0.0005)
for c in combined_data.T.get_terminals():
if c.name in pred_names:
pass
# c.color = (178, 34, 34 )
else:
c.color = (0,255,255)
c.color = (100,100,100)
prediction.interpolate_color(combined_data.T)
tree_utils.draw_tree(combined_data.T, axes=ax, cb=False)
for ff in file_formats:
plt.savefig('../figures_ms/Fig4AB_'+base_name+'_combined_polarizer'+ff)
| mit |
lail3344/sms-tools | lectures/08-Sound-transformations/plots-code/hps-morph.py | 24 | 2691 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile1='../../../sounds/violin-B3.wav'
window1='blackman'
M1=1001
N1=1024
t1=-100
minSineDur1=0.05
nH=60
minf01=200
maxf01=300
f0et1=10
harmDevSlope1=0.01
stocf=0.1
inputFile2='../../../sounds/soprano-E4.wav'
window2='blackman'
M2=901
N2=1024
t2=-100
minSineDur2=0.05
minf02=250
maxf02=500
f0et2=10
harmDevSlope2=0.01
Ns = 512
H = 128
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
hfreqIntp = np.array([0, .5, 1, .5])
hmagIntp = np.array([0, .5, 1, .5])
stocIntp = np.array([0, .5, 1, .5])
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs1)
UF.wavwrite(y,fs1, 'hps-morph.wav')
plt.figure(figsize=(12, 9))
frame = 200
plt.subplot(2,3,1)
plt.vlines(hfreq1[frame,:], -100, hmag1[frame,:], lw=1.5, color='b')
plt.axis([0,5000, -80, -15])
plt.title('x1: harmonics')
plt.subplot(2,3,2)
plt.vlines(hfreq2[frame,:], -100, hmag2[frame,:], lw=1.5, color='r')
plt.axis([0,5000, -80, -15])
plt.title('x2: harmonics')
plt.subplot(2,3,3)
yhfreq[frame,:][yhfreq[frame,:]==0] = np.nan
plt.vlines(yhfreq[frame,:], -100, yhmag[frame,:], lw=1.5, color='c')
plt.axis([0,5000, -80, -15])
plt.title('y: harmonics')
stocaxis = (fs1/2)*np.arange(stocEnv1[0,:].size)/float(stocEnv1[0,:].size)
plt.subplot(2,3,4)
plt.plot(stocaxis, stocEnv1[frame,:], lw=1.5, marker='x', color='b')
plt.axis([0,20000, -73, -27])
plt.title('x1: stochastic')
plt.subplot(2,3,5)
plt.plot(stocaxis, stocEnv2[frame,:], lw=1.5, marker='x', color='r')
plt.axis([0,20000, -73, -27])
plt.title('x2: stochastic')
plt.subplot(2,3,6)
plt.plot(stocaxis, ystocEnv[frame,:], lw=1.5, marker='x', color='c')
plt.axis([0,20000, -73, -27])
plt.title('y: stochastic')
plt.tight_layout()
plt.savefig('hps-morph.png')
plt.show()
| agpl-3.0 |
fmfn/BayesianOptimization | tests/test_util.py | 1 | 4503 | import pytest
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, Colours
from bayes_opt.util import acq_max, load_logs, ensure_rng
from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor
def get_globals():
X = np.array([
[0.00, 0.00],
[0.99, 0.99],
[0.00, 0.99],
[0.99, 0.00],
[0.50, 0.50],
[0.25, 0.50],
[0.50, 0.25],
[0.75, 0.50],
[0.50, 0.75],
])
def get_y(X):
return -(X[:, 0] - 0.3) ** 2 - 0.5 * (X[:, 1] - 0.6)**2 + 2
y = get_y(X)
mesh = np.dstack(
np.meshgrid(np.arange(0, 1, 0.005), np.arange(0, 1, 0.005))
).reshape(-1, 2)
GP = GaussianProcessRegressor(
kernel=Matern(),
n_restarts_optimizer=25,
)
GP.fit(X, y)
return {'x': X, 'y': y, 'gp': GP, 'mesh': mesh}
def brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0):
uf = UtilityFunction(kind=kind, kappa=kappa, xi=xi)
mesh_vals = uf.utility(MESH, GP, 2)
max_val = mesh_vals.max()
max_arg_val = MESH[np.argmax(mesh_vals)]
return max_val, max_arg_val
GLOB = get_globals()
X, Y, GP, MESH = GLOB['x'], GLOB['y'], GLOB['gp'], GLOB['mesh']
def test_utility_fucntion():
util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0)
assert util.kind == "ucb"
util = UtilityFunction(kind="ei", kappa=1.0, xi=1.0)
assert util.kind == "ei"
util = UtilityFunction(kind="poi", kappa=1.0, xi=1.0)
assert util.kind == "poi"
with pytest.raises(NotImplementedError):
util = UtilityFunction(kind="other", kappa=1.0, xi=1.0)
def test_acq_with_ucb():
util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=20
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_acq_with_ei():
util = UtilityFunction(kind="ei", kappa=1.0, xi=1e-6)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=200,
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='ei', kappa=1.0, xi=1e-6)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_acq_with_poi():
util = UtilityFunction(kind="poi", kappa=1.0, xi=1e-4)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=200,
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='poi', kappa=1.0, xi=1e-4)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_logs():
import pytest
def f(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
optimizer = BayesianOptimization(
f=f,
pbounds={"x": (-2, 2), "y": (-2, 2)}
)
assert len(optimizer.space) == 0
load_logs(optimizer, "./tests/test_logs.json")
assert len(optimizer.space) == 5
load_logs(optimizer, ["./tests/test_logs.json"])
assert len(optimizer.space) == 5
other_optimizer = BayesianOptimization(
f=lambda x: -x ** 2,
pbounds={"x": (-2, 2)}
)
with pytest.raises(ValueError):
load_logs(other_optimizer, ["./tests/test_logs.json"])
def test_colours():
colour_wrappers = [
(Colours.END, Colours.black),
(Colours.BLUE, Colours.blue),
(Colours.BOLD, Colours.bold),
(Colours.CYAN, Colours.cyan),
(Colours.DARKCYAN, Colours.darkcyan),
(Colours.GREEN, Colours.green),
(Colours.PURPLE, Colours.purple),
(Colours.RED, Colours.red),
(Colours.UNDERLINE, Colours.underline),
(Colours.YELLOW, Colours.yellow),
]
for colour, wrapper in colour_wrappers:
text1 = Colours._wrap_colour("test", colour)
text2 = wrapper("test")
assert text1.split("test") == [colour, Colours.END]
assert text2.split("test") == [colour, Colours.END]
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_target_space.py
"""
import pytest
pytest.main([__file__])
| mit |
ElDeveloper/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/linear_model/tests/test_huber.py | 12 | 7600 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model._huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
def loss_func(x, *args):
return _huber_loss_and_gradient(x, *args)[0]
def grad_func(x, *args):
return _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor()
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make
# sure that the number of decimal places used is somewhat insensitive to
# the amplitude of the coefficients and therefore to the scale of the
# data and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor()
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, max_iter=10000,
fit_intercept=False, epsilon=1.35, tol=None)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber
# regressor.
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
# The huber model should also fit poorly on the outliers.
assert ridge_outlier_score > huber_outlier_score
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0,
random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/conftest.py | 2 | 8855 | from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, NaT, date_range
import pandas._testing as tm
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
@pytest.fixture
def float_frame_with_na():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997
DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872
neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522
0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018
3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826
soujjZ0A08 NaN NaN NaN NaN
7W6NLGsjB9 NaN NaN NaN NaN
... ... ... ... ...
uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590
n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717
ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189
uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503
3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947
2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083
sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
return df
@pytest.fixture
def bool_frame_with_na():
"""
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
zBZxY2IDGd False False False False
IhBWBMWllt False True True True
ctjdvZSR6R True False True True
AVTujptmxb False True False True
G9lrImrSWq False False False True
sFFwdIUfz2 NaN NaN NaN NaN
s15ptEJnRb NaN NaN NaN NaN
... ... ... ... ...
UW41KkDyZ4 True True False False
l9l6XkOdqV True False False False
X2MeZfzDYA False True False False
xWkIKU7vfX False True False True
QOhL6VmpGU False False False True
22PwkRJdat False True False False
kfboQ3VeIK True False True False
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData()) > 0
df = df.astype(object)
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
return df
@pytest.fixture
def float_string_frame():
"""
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
A B C D foo
w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar
PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar
ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar
3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar
khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar
LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar
HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar
... ... ... ... ... ...
9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar
h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar
mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar
oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar
9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar
jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar
lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar
[30 rows x 5 columns]
"""
df = DataFrame(tm.getSeriesData())
df["foo"] = "bar"
return df
@pytest.fixture
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993
KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588
VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731
kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607
CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266
0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541
tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710
... ... ... ... ...
7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237
4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612
B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653
hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427
1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827
9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204
xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype("float32")
df.B = df.B.astype("float32")
df.C = df.C.astype("float16")
df.D = df.D.astype("float64")
return df
@pytest.fixture
def mixed_int_frame():
"""
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
mUrCZ67juP 0 1 2 2
rw99ACYaKS 0 1 0 0
7QsEcpaaVU 0 1 1 1
xkrimI2pcE 0 1 0 0
dz01SuzoS8 0 1 255 255
ccQkqOHX75 -1 1 0 0
DN0iXaoDLd 0 1 0 0
... .. .. ... ...
Dfb141wAaQ 1 1 254 254
IPD8eQOVu5 0 1 0 0
CcaKulsCmv 0 1 0 0
rIBa8gu7E5 0 1 0 0
RP6peZmh5o 0 1 1 1
NMb9pipQWQ 0 1 0 0
PqgbJEzjib 0 1 3 3
[30 rows x 4 columns]
"""
df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
df.A = df.A.astype("int32")
df.B = np.ones(len(df.B), dtype="uint64")
df.C = df.C.astype("uint8")
df.D = df.C.astype("int64")
return df
@pytest.fixture
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"float32": np.array([1.0] * 10, dtype="float32"),
"int32": np.array([1] * 10, dtype="int32"),
},
index=np.arange(10),
)
@pytest.fixture
def timezone_frame():
"""
Fixture for DataFrame of date_range Series with different time zones
Columns are ['A', 'B', 'C']; some entries are missing
A B C
0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00
1 2013-01-02 NaT NaT
2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00
"""
df = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
df.iloc[1, 1] = NaT
df.iloc[1, 2] = NaT
return df
@pytest.fixture
def uint64_frame():
"""
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
"""
return DataFrame(
{"A": np.arange(3), "B": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64
)
@pytest.fixture
def simple_frame():
"""
Fixture for simple 3x3 DataFrame
Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].
one two three
a 1.0 2.0 3.0
b 4.0 5.0 6.0
c 7.0 8.0 9.0
"""
arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"])
@pytest.fixture
def frame_of_index_cols():
"""
Fixture for DataFrame of columns that can be used for indexing
Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];
'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.
A B C D E (tuple, as, label)
0 foo one a 0.608477 -0.012500 -1.664297
1 foo two b -0.633460 0.249614 -0.364411
2 foo three c 0.615256 2.154968 -0.834666
3 bar one d 0.234246 1.085675 0.718445
4 bar two e 0.533841 -0.005702 -3.533912
"""
df = DataFrame(
{
"A": ["foo", "foo", "foo", "bar", "bar"],
"B": ["one", "two", "three", "one", "two"],
"C": ["a", "b", "c", "d", "e"],
"D": np.random.randn(5),
"E": np.random.randn(5),
("tuple", "as", "label"): np.random.randn(5),
}
)
return df
| bsd-3-clause |
ajribeiro/rtapp | data/ml.py | 1 | 5670 | import cPickle
import mysql.connector
import dbUtils
import os
import hashlib
from sklearn.feature_extraction import FeatureHasher, DictVectorizer
def classify(title):
with open('data/perceptron.pkl', 'rb') as fid:
clf = cPickle.load(fid)
(user,pas) = dbUtils.get_user_pass()
mydb = dbUtils.db_access('rottentomatoes', user,pas)
qry = "SELECT * FROM imdb_info as i inner join "
qry += "rt_info as r on i.imdb_id = r.imdb_id "
qry += "where i.title = \""+title+"\";"
if ';' in title: return None
mydb.cursor.execute(qry)
temp_movie = [m for m in mydb.cursor]
print temp_movie
poster = temp_movie[0][13]
thumb = temp_movie[0][12]
movies = [[m[0],m[1],m[3],m[5],m[9]>=60] for m in temp_movie]
if movies == []: return None
mov = movies[0]
[id,title,director,budget,rat] = mov
qry = "SELECT actor_id,nam from casts where imdb_id = "+"\""+id+"\";"
mydb.cursor.execute(qry)
actors = [m for m in mydb.cursor]
mov.append(actors)
movdict = {}
for a in mov[5]:
movdict[a[0]] = 1
# movdict['budget'] = mov[3]
hash_object = hashlib.sha224(mov[2])
hex_dig = hash_object.hexdigest()
movdict[hex_dig] = 1
with open('data/trainingdata.pkl', 'rb') as fid:
training_data = cPickle.load(fid)
training_data.append(movdict)
vec = DictVectorizer()
X_train = vec.fit_transform(training_data)
pred = clf.predict(X_train[-1])
print pred[0],mov[4]
return (title, pred[0], mov[4], thumb, poster)
def regress_cast(actors,writers,director):
with open('data/knnreg.pkl', 'rb') as fid:
clf = cPickle.load(fid)
(user,pas) = dbUtils.get_user_pass()
mydb = dbUtils.db_access('rottentomatoes', user,pas)
qry = "SELECT ratio,total FROM director_stats WHERE "
qry += "director = %s;" % ("\""+director+"\"")
print qry
director_stats = mydb.query(qry)
directorsum = 0.
total = 1e-6
for w in director_stats:
directorsum += w[0]*w[1]
total += w[1]
directorsum /= float(total)
actor_ids = ["\""+a+"\"" for a in actors]
actorlist = "("
for w in actor_ids:
actorlist += w+","
actorlist = actorlist[:-1]+")"
qry = "SELECT ratio,total FROM actor_stats WHERE "
qry += "actor_name IN %s;" % actorlist
print qry
actor_stats = mydb.query(qry)
actorsum = 0.
total = 1e-6
for w in actor_stats:
actorsum += w[0]*w[1]
total += w[1]
actorsum /= float(total)
writer_ids = ("\""+a+"\"" for a in writers)
writerlist = "("
for w in writer_ids:
writerlist += w+","
writerlist = writerlist[:-1]+")"
qry = "SELECT ratio,total FROM writer_stats WHERE "
qry += "writer_name IN %s;" % writerlist
print qry
writer_stats = mydb.query(qry)
writersum = 0.
total = 1e-6
for w in writer_stats:
writersum += w[0]*w[1]
total += w[1]
writersum /= float(total)
prediction = clf.predict([actorsum,directorsum,writersum])[0]
return prediction
def regress_title(title):
with open('data/knnreg.pkl', 'rb') as fid:
clf = cPickle.load(fid)
(user,pas) = dbUtils.get_user_pass()
mydb = dbUtils.db_access('rottentomatoes', user,pas)
qry = "SELECT * FROM imdb_info as i inner join "
qry += "rt_info as r on i.imdb_id = r.imdb_id "
qry += "where i.title = %s;" % ("\""+title+"\"")
if ';' in title: return None
movies = mydb.query(qry)[0]
# movies = [[m[0],m[1],m[3],m[5],m[9]>=60] for m in temp_movie]
if movies == []: return None
poster = movies[13]
thumb = movies[12]
imdb_id = movies[0]
director = movies[3]
print director
qry = "SELECT ratio,total FROM director_stats WHERE "
qry += "director = %s;" % ("\""+director+"\"")
print qry
director_stats = mydb.query(qry)
directorsum = 0.
total = 1e-6
for w in director_stats:
directorsum += w[0]*w[1]
total += w[1]
directorsum /= float(total)
qry = "SELECT actor_id,nam FROM casts WHERE "
qry += "imdb_id = %s;" % ("\""+imdb_id+"\"")
actors = mydb.query(qry)
actor_ids = ["\""+a[0]+"\"" for a in actors]
actornames = [a[1] for a in actors]
actorlist = "("
for w in actor_ids:
actorlist += w+","
actorlist = actorlist[:-1]+")"
qry = "SELECT ratio,total FROM actor_stats WHERE "
qry += "actor_id IN %s;" % actorlist
print qry
actor_stats = mydb.query(qry)
actorsum = 0.
total = 1e-6
for w in actor_stats:
actorsum += w[0]*w[1]
total += w[1]
actorsum /= float(total)
qry = "SELECT writer_id,writer_name FROM writers WHERE "
qry += "imdb_id = %s;" % ("\""+imdb_id+"\"")
writers = mydb.query(qry)
writer_ids = ("\""+a[0]+"\"" for a in writers)
writernames = [a[1] for a in writers]
writerlist = "("
for w in writer_ids:
writerlist += w+","
writerlist = writerlist[:-1]+")"
qry = "SELECT ratio,total FROM writer_stats WHERE "
qry += "writer_id IN %s;" % writerlist
print qry
writer_stats = mydb.query(qry)
writersum = 0.
total = 1e-6
for w in writer_stats:
writersum += w[0]*w[1]
total += w[1]
writersum /= float(total)
prediction = clf.predict([actorsum,directorsum,writersum])[0]
qry = "SELECT critic FROM rt_info WHERE "
qry += "imdb_id = %s;" % ("\""+imdb_id+"\"")
actual = mydb.query(qry)[0]
return {'prediction':prediction, 'actual':actual, \
'cast':actornames, 'director':director, 'writers':writernames,
'thumb':thumb, 'poster':poster}
| mit |
ilayn/scipy | scipy/optimize/_lsq/least_squares.py | 12 | 39190 | """Generic interface for least-squares minimization."""
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, str) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol, method):
def check(tol, name):
if tol is None:
tol = 0
elif tol < EPS:
warn("Setting `{}` below the machine epsilon ({:.2e}) effectively "
"disables the corresponding termination condition."
.format(name, EPS))
return tol
ftol = check(ftol, "ftol")
xtol = check(xtol, "xtol")
gtol = check(gtol, "gtol")
if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
raise ValueError("All tolerances must be higher than machine epsilon "
"({:.2e}) for method 'lm'.".format(EPS))
elif ftol < EPS and xtol < EPS and gtol < EPS:
raise ValueError("At least one of the tolerances must be higher than "
"machine epsilon ({:.2e}).".format(EPS))
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, str) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-D real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must allocate and return a 1-D array_like of shape (m,) or a scalar.
If the argument ``x`` is complex or the function ``fun`` returns
complex residuals, it must be wrapped in a real function of real
arguments, as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-D array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as many operations as '2-point' (default). The scheme 'cs'
uses complex steps, and while potentially the most accurate, it is
applicable only when `fun` correctly handles complex inputs and
can be analytically continued to the complex plane. Method 'lm'
always uses the '2-point' scheme. If callable, it is used as
``jac(x, *args, **kwargs)`` and should return a good approximation
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
is applied), a sparse matrix (csr_matrix preferred for performance) or
a `scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float or None, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
xtol : float or None, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
gtol : float or None, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along jth
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-D ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default), the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally, ``method='trf'`` supports 'regularize' option
(bool, default is True), which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default), then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
result : OptimizeResult
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is
always the uniform norm of the gradient. In constrained problems,
it is the quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a
sequence of strictly feasible iterates and `active_mask` is
determined within a tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do
not count function calls for numerical Jacobian approximation, as
opposed to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve-fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also,
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e., robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independent variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
... rng = default_rng(seed)
...
... y = a + b * np.exp(t * c)
...
... error = noise * rng.standard_normal(t.size)
... outliers = rng.integers(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And, finally, plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-D complex function of n complex
variables we optimize a 2m-D real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like. "
"f0.shape: {0}".format(f0.shape))
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = J0.tocsr()
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs).tocsr()
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| bsd-3-clause |
florian-f/sklearn | examples/feature_stacker.py | 8 | 1941 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is benefitial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3-clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
# Classify:
svm = SVC(kernel="linear")
svm.fit(X_features, y)
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
booya-at/paraBEM | examples/openglider/shape_cl_cd.py | 2 | 4070 | from copy import deepcopy
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# from openglider.glider.parametric import ParametricGlider
from openglider.jsonify import dump, load
from openglider.glider.in_out.export_3d import parabem_Panels
from openglider.utils.distribution import Distribution
import parabem
from parabem.pan3d import DirichletDoublet0Source0Case3 as Case
from parabem.vtk_export import CaseToVTK
from parabem.utils import check_path, v_inf_deg_range3
count = 0
# load the glider
with open("glider/referenz_schirm_berg.json") as _file:
glider2d = load(_file)["data"]
area = glider2d.shape.area
aspect_ratio = glider2d.shape.aspect_ratio
def glider_set_controlpoint(glider2d, x):
c = deepcopy(glider2d.shape.front_curve.controlpoints)
c[0, 0] = x
glider2d.shape.front_curve.controlpoints = list(c)
c = deepcopy(glider2d.shape.back_curve.controlpoints)
c[0, 0] = x
glider2d.shape.back_curve.controlpoints = list(c)
glider2d.shape.set_aspect_ratio(aspect_ratio, "span")
# glider2d.set_flat_area(area, "aspect_ratio")
glider2d.shape.set_const_cell_dist()
def shape_plot(glider2d):
shape = glider2d.shape.get_shape()
line = list(shape.front) + list(shape.back[::-1]) + [shape.front[0]]
x, y = zip(*line)
middle = sum(y) / len(y)
y = [i - middle for i in y]
return x, y
def min_func(x):
global count
count += 1
print("\niteration: " + str(count))
_glider2d = deepcopy(glider2d)
glider_set_controlpoint(_glider2d, x)
glider3d = _glider2d.get_glider_3d()
panels = parabem_Panels(glider3d,
midribs=0,
profile_numpoints=40,
symmetric=True,
distribution=Distribution.from_nose_cos_distribution(40, 0.2),
num_average=0)
case = Case(panels[1], panels[2])
case.farfield = 5
case.drag_calc = "trefftz"
case.A_ref = 23
case.create_wake(length=10000, count=5)
case.v_inf = parabem.Vector3(8, 0, 1)
case.trefftz_cut_pos = case.v_inf * 50
alpha = v_inf_deg_range3(case.v_inf, 5, 9, 20)
polars = case.polars(alpha)
vtk_writer = CaseToVTK(case, "results/vtk_opt", suffix=str(count))
vtk_writer.write_panels(data_type="point")
cL = []
cD = []
for i in polars.values:
cL.append(i.cL)
cD.append(i.cD)
return np.interp(0.6, cL, cD)
y_positions = np.linspace(-6, 10, 20)
cD = [min_func(i) for i in y_positions]
# plt.show()
plt.figure(figsize=(10, 4))
plt.plot(y_positions, cD, marker="x")
plt.xlabel('y-Position der Kontrollpunkte [m]', fontsize=15)
plt.ylabel('induzierter Widerstandsbeiwert $c_{Wi}$', fontsize=15)
plt.grid(True)
plt.savefig(check_path('results/vtk_opt/induced_drag.png'), bbox_inches='tight')
# # plt.show()
plt.close()
plt.figure(figsize=(10, 4))
plt.plot(y_positions, 0.6 / np.array(cD), marker="x")
plt.xlabel('y-Position der Kontrollpunkte [m]', fontsize=15)
plt.ylabel('Gleitzahl $\\epsilon$', fontsize=15)
plt.grid(True)
plt.savefig(check_path('results/vtk_opt/glide.png'), bbox_inches='tight')
plt.close()
best_ind = list(cD).index(min(cD))
best_y = y_positions[best_ind]
glider_set_controlpoint(glider2d, best_y)
with open(check_path("results/glider/optimized.json"), "w") as _file:
dump(glider2d, _file)
plt.figure(figsize=(10, 5))
plt.axes().set_aspect("equal", "datalim")
plt.grid(True)
plt.plot(*shape_plot(glider2d), color="0", label="optimal", linewidth=2)
glider_set_controlpoint(glider2d, y_positions[0])
with open(check_path("results/glider/min.json"), "w") as _file:
dump(glider2d, _file)
plt.plot(*shape_plot(glider2d), color="r", label="untere Grenze")
glider_set_controlpoint(glider2d, y_positions[-1])
with open(check_path("results/glider/max.json"), "w") as _file:
dump(glider2d, _file)
plt.plot(*shape_plot(glider2d), color="b", label="obere Grenze")
plt.legend()
plt.ylim((-2, 3))
plt.savefig(check_path('results/vtk_opt/shapes.png'), bbox_inches='tight')
| gpl-3.0 |
dingocuster/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
rmcdermo/macfp-db | Extinction/FM_Burner/Computational_Results/2021/UWuppertal/UWuppertal_FM_Burner_plot_results.py | 1 | 2510 | #!/usr/bin/env python3
# McDermott
# Feb 2021
# first, make sure the macfp module directory is in your path
# if not, uncomment the lines below and replace <path to macfp-db>
# with the path (absolute or relative) to your macfp-db repository
#================================================================================
# import glob
import sys
#--------------------------------------------------------------------------------
sys.path.append('../../../../../../macfp-db/Utilities/')
#--------------------------------------------------------------------------------
import macfp
import importlib
importlib.reload(macfp) # use for development (while making changes to macfp.py)
import matplotlib.pyplot as plt
#--------------------------------------------------------------------------------
plotFile_01 = "01_very_coarse_v0a_v0b_v0f_UoWu_FM_Burner_dataplot_config.csv"
plotFile_02 = "02_coarse_RadImpact_UoWu_FM_Burner_dataplot_config.csv"
plotFile_03 = "03_allMesh_v0f_v2f_v3f_UoWu_FM_Burner_dataplot_config.csv"
plotFile_04 = "04_averaging_15s_vs_40s_v2e_v3i_UoWu_FM_Burner_dataplot_config.csv"
plotFile_all = "UoW_FM_Burner_dataplot_config.csv"
macfp.dataplot(config_filename = plotFile_01,
institute='UWuppertal',
expdir='../../../Experimental_Data/',
pltdir='./Plots_01/',
close_figs=True,
verbose=True,
plot_list=['all'])
macfp.dataplot(config_filename = plotFile_02,
institute='UWuppertal',
expdir='../../../Experimental_Data/',
pltdir='./Plots_02/',
close_figs=True,
verbose=True,
plot_list=['all'])
macfp.dataplot(config_filename = plotFile_03,
institute='UWuppertal',
expdir='../../../Experimental_Data/',
pltdir='./Plots_03/',
close_figs=True,
verbose=True,
plot_list=['all'])
macfp.dataplot(config_filename = plotFile_04,
institute='UWuppertal',
expdir='../../../Experimental_Data/',
pltdir='./Plots_04/',
close_figs=True,
verbose=True,
plot_list=['all'])
macfp.dataplot(config_filename = plotFile_all,
institute='UWuppertal',
expdir='../../../Experimental_Data/',
pltdir='./Plots_all/',
close_figs=True,
verbose=True,
plot_list=['all'])
| mit |
cybernet14/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
mahajrod/MACE | scripts/windows_boxplots.py | 1 | 12032 | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from _collections import OrderedDict
from RouToolPa.Collections.General import SynDict, IdList
from MACE.Routines import Visualization, StatsVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_list", action="store", dest="input_list", required=True, type=lambda s: s.split(","),
help="Comma-separated list of files with variant counts in windows.")
parser.add_argument("-l", "--label_list", action="store", dest="label_list", required=True,type=lambda s: s.split(","),
help="Comma-separated list of labels for files. Should has the same length as input_list")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("png", "svg"),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg, png")
parser.add_argument("-x", "--x_list", action="store", dest="x_list", type=lambda s: s.split(","),
default=None,
help="Comma-separated list of ids of X chromosomes. "
"Should include either one element if X chromosome id is thesame for all input files or has the same length as input list."
"Could be not set ")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", required=True, type=int,
help="Size of the windows. Required.")
parser.add_argument("-m", "--multiplier", action="store", dest="multiplier", default=1000, type=int,
help="Multiplier for counts. Default:1000, ie counts will scaled per 1000 bp")
parser.add_argument("-y", "--ylabel", action="store", dest="ylabel", default="variants/kbp",
help="Label for Y axis. Default: 'variants/kbp'")
"""
parser.add_argument("-m", "--mean_coverage_list", action="store", dest="mean_coverage_list", required=True,
type=lambda s: list(map(float, s.split(","))),
help="Comma-separated list of mean/median coverage to use")
parser.add_argument("--scaffold_column_name", action="store", dest="scaffold_column_name", default="scaffold",
help="Name of column in coverage file with scaffold ids per window. Default: scaffold")
parser.add_argument("--window_column_name", action="store", dest="window_column_name", default="window",
help="Name of column in coverage file with window id. Default: window")
parser.add_argument("--coverage_column_name_list", action="store", dest="coverage_column_name_list",
default=["median", "mean"],
type=lambda s: s.split(","),
help="Coverage file with mean/median coverage per window. Default: median,mean")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", default=100000, type=int,
help="Size of the windows Default: 100000")
parser.add_argument("-s", "--window_step", action="store", dest="window_step", default=None, type=int,
help="Step of the sliding windows. Default: window size, i.e windows are staking")
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
parser.add_argument("-y", "--sort_scaffolds", action="store_true", dest="sort_scaffolds", default=False,
help="Order scaffolds according to their names. Default: False")
parser.add_argument("-z", "--scaffold_ordered_list", action="store", dest="scaffold_ordered_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to draw first and exactly in same order. "
"Scaffolds absent in this list are drawn last and in order according to vcf file . "
"Default: not set")
parser.add_argument("-n", "--scaffold_length_file", action="store", dest="scaffold_length_file", required=True,
help="File with lengths of scaffolds")
parser.add_argument("--scaffold_syn_file", action="store", dest="scaffold_syn_file",
help="File with scaffold id synonyms")
parser.add_argument("--syn_file_key_column", action="store", dest="syn_file_key_column",
default=0, type=int,
help="Column(0-based) with key(current id) for scaffolds in synonym file. Default: 0")
parser.add_argument("--syn_file_value_column", action="store", dest="syn_file_value_column",
default=1, type=int,
help="Column(0-based) with value(synonym id) for scaffolds in synonym file synonym. Default: 1")
parser.add_argument("--colormap", action="store", dest="colormap", default="jet",
help="Matplotlib colormap to use for SNP densities. Default: jet")
parser.add_argument("--coverage_thresholds", action="store", dest="coverage_thresholds",
default=(0.0, 0.25, 0.75, 1.25, 1.75, 2.25),
type=lambda s: list(map(float, s.split(","))),
help="Comma-separated list of coverage thresholds(relative to mean/median) to use for "
"window coloring."
"Default: (0.0, 0.25, 0.75, 1.25, 1.75, 2.25)")
parser.add_argument("--split_coverage_thresholds", action="store_true", dest="split_coverage_thresholds",
help="Use (0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75,"
"0.875, 1.0, 1.125, 1.25, 1.375, 1.5, 1.625,"
"1.75, 1.875, 2.0, 2.125, 2.25) as thresholds instead of default ones."
"Doesn't work if --coverage_thresholds is set")
parser.add_argument("--test_colormaps", action="store_true", dest="test_colormaps",
help="Test colormaps. If set --colormap option will be ignored")
parser.add_argument("--hide_track_label", action="store_true", dest="hide_track_label", default=False,
help="Hide track label. Default: False")
parser.add_argument("--absolute_coverage_values", action="store_true", dest="absolute_coverage_values",
help="Use absolute coverage values. Default: False")
parser.add_argument("--figure_width", action="store", dest="figure_width", type=float, default=15,
help="Width of figure in inches. Default: 15")
parser.add_argument("--figure_height_per_scaffold", action="store", dest="figure_height_per_scaffold",
type=float, default=0.5,
help="Height of figure per chromosome track. Default: 0.5")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print additional info to stdout")
"""
parser.add_argument("--subplots_adjust_left", action="store", dest="subplots_adjust_left", type=float, default=0.15,
help="Adjust left border of subplots on the figure. Default: 0.15")
parser.add_argument("--subplots_adjust_top", action="store", dest="subplots_adjust_top", type=float, default=0.98,
help="Adjust top border of subplots on the figure. Default: 0.98")
parser.add_argument("--subplots_adjust_right", action="store", dest="subplots_adjust_right", type=float, default=0.98,
help="Adjust right border of subplots on the figure. Default: 0.98")
parser.add_argument("--subplots_adjust_bottom", action="store", dest="subplots_adjust_bottom", type=float, default=0.17,
help="Adjust bottom border of subplots on the figure. Default: 0.17")
args = parser.parse_args()
number_of_files = len(args.input_list)
if number_of_files != len(args.label_list):
raise ValueError("ERROR!!! Number of input files({0}) is not equal to number of labels({1})".format(number_of_files, len(args.label_list)))
if args.x_list is None:
x_chr_list = []
elif len(args.x_list) == 1:
x_chr_list = args.x_list * number_of_files
else:
if number_of_files != len(args.x_list):
raise ValueError("ERROR!!! Number of X chromosome ids is required to be one or equal to number of input files({0})".format(number_of_files))
else:
x_chr_list = args.x_list
file_dict = OrderedDict(zip(args.label_list, args.input_list))
x_chr_dict = OrderedDict(zip(args.label_list, x_chr_list)) if x_chr_list else OrderedDict()
df_dict = OrderedDict({})
noX_df_dict = OrderedDict({})
onlyX_df_dict = OrderedDict({})
for entry in file_dict:
df_dict[entry] = pd.read_csv(file_dict[entry], sep="\t", index_col=["CHROM",])
if x_chr_dict:
noX_df_dict[entry] = df_dict[entry].loc[df_dict[entry].index != x_chr_dict[entry], :]
onlyX_df_dict[entry] = df_dict[entry].loc[df_dict[entry].index == x_chr_dict[entry], :]
df_dict[entry]["density"] = df_dict[entry]["All"] * args.multiplier / args.window_size
if x_chr_dict:
noX_df_dict[entry]["density"] = noX_df_dict[entry]["All"] * args.multiplier / args.window_size
onlyX_df_dict[entry]["density"] = onlyX_df_dict[entry]["All"] * args.multiplier / args.window_size
figure_height = 6
figure_width = (number_of_files * 3) if x_chr_dict else number_of_files
dpi = 300
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(figure_width, figure_height), dpi=dpi)
fig.patch.set_color('white')
data_list = []
label_list = []
x_pos_list = []
inner_distance = 1
otter_distance = 2
if x_chr_dict:
distance = inner_distance
for entry in file_dict:
for data, label in zip((df_dict, noX_df_dict, onlyX_df_dict), ("all", "noX", "onlyX")):
data_list.append(data[entry]["density"])
label_list.append(label)
if not x_pos_list:
x_pos_list.append(0)
else:
x_pos_list.append(x_pos_list[-1] + distance)
distance = 1
distance = otter_distance
plt.xticks(rotation=0)
else:
distance = 1
for entry in file_dict:
data_list.append(df_dict[entry]["density"])
label_list.append(entry)
if not x_pos_list:
x_pos_list.append(0)
else:
x_pos_list.append(x_pos_list[-1] + distance)
plt.xticks(rotation=45, fontstyle='italic')
plt.boxplot(data_list, labels=label_list, positions=x_pos_list)
plt.ylabel(args.ylabel)
plt.ylim(ymin=-0.1)
plt.grid(linestyle='dotted')
max_data_y = max(list(map(max, data_list)))
min_data_y = min(list(map(min, data_list)))
if x_chr_dict:
for index in range(0, number_of_files):
if x_chr_dict:
label_x_coord = x_pos_list[index * number_of_files] + (x_pos_list[(index + 1) * number_of_files - 1] - x_pos_list[index * number_of_files])/2
else:
label_x_coord = x_pos_list[index]
plt.text(label_x_coord, min_data_y - max_data_y/6 , args.label_list[index],
fontstyle="italic", horizontalalignment='center')
plt.subplots_adjust(top=args.subplots_adjust_top, bottom=args.subplots_adjust_bottom,
left=args.subplots_adjust_left, right=args.subplots_adjust_right)
for ext in "png", "svg":
plt.savefig("{0}.{1}".format(args.output_prefix, ext), transparent=False)
| apache-2.0 |
ali-abdullah/nlp | bag-of-words.py | 1 | 3849 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn as sk
import re
BOW_df = pd.DataFrame(columns=['1','2','3','4','5'])
df = pd.read_csv("Reviews.csv")
words_set = {'.'}
#print(BOW_df.head())
def prepare_data(df):
# This function should generate the train and test dataframe
df = df.drop(['Id', 'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator','Time', 'Summary'] , axis = 1)
#df['Score'] = df['Score'].map( { 1 : -1, 2 : -1, 3 : 0 , 4 : 1, 5 : 1} )
print(df.head())
return df
df = prepare_data(df).sample(frac = 1, random_state = 21)
train = df.iloc[:10000,:]
test = df.iloc[20000:20020,:]
def Tokenizer(text):
#Returns a list of words in the text
#remove new lines
text.rstrip()
# text = text.replace("<br", "")
# text = text.replace("<Br", "")
# text = text.replace("<a href", "")
text = re.sub("(<[^>]*>)", " ", text)
text = re.sub(r"[^\w\s]", "", text)
text = re.sub(r"\s+", " ", text)
text = text.lower()
words = text.split(" ")
return words
for i, review in train.iterrows():
text = review['Text']
score = review['Score']
score -= 1
tokenized_text = Tokenizer(text)
for word in tokenized_text:
if word not in words_set:
words_set.add(word)
BOW_df.loc[word] = [0,0,0,0,0]
BOW_df.loc[word][score] += 1
else:
BOW_df.loc[word][score] += 1
print(BOW_df)
for i, word_freq in BOW_df.iterrows():
total = 0.0
total = word_freq['1'] + word_freq['2'] + word_freq['3'] + word_freq['4'] + word_freq['5']
#print(total)
#float(word_freq['1']) / float(total)
if (word_freq['1'] > 1000) or (word_freq['2'] > 1000) or (word_freq['3'] > 1000) or (word_freq['4'] > 1000) or (word_freq['5'] > 1000) :
word_freq['1'] = float(word_freq['1']) / float(total)
word_freq['2'] = float(word_freq['2']) / float(total)
word_freq['3'] = float(word_freq['3']) / float(total)
word_freq['4'] = float(word_freq['4']) / float(total)
word_freq['5'] = float(word_freq['5']) / float(total)
else:
word_freq['1'] = 0
word_freq['2'] = 0
word_freq['3'] = 0
word_freq['4'] = 0
word_freq['5'] = 0
if (word_freq['1'] == 1) or (word_freq['2'] == 1) or (word_freq['3'] == 1) or (word_freq['4'] == 1) or (word_freq['5'] == 1) :
word_freq['1'] = 0
word_freq['2'] = 0
word_freq['3'] = 0
word_freq['4'] = 0
word_freq['5'] = 0
BOW_df = BOW_df.sort_values(['5'] , ascending=[0])
positive_words = BOW_df.iloc[:400, 5:5]
BOW_df = BOW_df.sort_values(['1'] , ascending=[0])
negative_words = BOW_df.iloc[:400, 5:5]
correct_guesses = 0
incorrect_guesses = 0
for i, review in test.iterrows():
text = review['Text']
score = review['Score']
print(text)
print(score)
tokenized_text = Tokenizer(text)
postive_count = 0
negative_count = 0
for word in tokenized_text:
if word in positive_words.index.values :
postive_count += 1
if word in negative_words.index.values:
negative_count += 1
total = postive_count - negative_count
print("the total is")
print(total)
if (total > 0 and score == 4) or (total > 0 and score == 5) or (total < 0 and score == 2) or (total < 0 and score == 1) or (total == 0 and score == 3):
correct_guesses += 1
else :
incorrect_guesses += 1
print("correct guesses")
print(correct_guesses)
print("incorrect guesses")
print(incorrect_guesses)
score = float(correct_guesses) / float(correct_guesses + incorrect_guesses)
print("Your score is: ")
print(score)
print(BOW_df)
print(positive_words)
print(negative_words)
| mit |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/testing/noseclasses.py | 8 | 2186 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from matplotlib.testing.exceptions import (KnownFailureTest,
KnownFailureDidNotFailTest,
ImageComparisonFailure)
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.
This is based on numpy.testing.noseclasses.KnownFailure.
'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
def addError(self, test, err, *zero_nine_capt_args):
# Fixme (Really weird): if I don't leave empty method here,
# nose gets confused and KnownFails become testing errors when
# using the MplNosePlugin and MplTestCase.
# The *zero_nine_capt_args captures an extra argument. There
# seems to be a bug in
# nose.testing.manager.ZeroNinePlugin.addError() in which a
# 3rd positional argument ("capt") is passed to the plugin's
# addError() method, even if one is not explicitly using the
# ZeroNinePlugin.
pass
| mit |
bjornsturmberg/EMUstack | docs/source/conf.py | 2 | 8976 | # -*- coding: utf-8 -*-
#
# EMUstack documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 14 14:17:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['scipy', 'scipy.interpolate', 'numpy',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.mlab', 'matplotlib.gridspec',
'fortran']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../backend/'))
#
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EMUstack'
copyright = u'2014, Björn Sturmberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EMUstackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EMUstack.tex', u'EMUstack Documentation',
u'Björn Sturmberg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'emustack', u'EMUstack Documentation',
[u'Björn Sturmberg'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EMUstack', u'EMUstack Documentation',
u'Björn Sturmberg', 'EMUstack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
makelove/OpenCV-Python-Tutorial | ch46-机器学习-K近邻/2-英文字母的OCR.py | 1 | 1291 | # -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午7:35
# @Author : play4fun
# @File : 2-英文字母的OCR.py
# @Software: PyCharm
"""
2-英文字母的OCR.py:
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the data, converters convert the letter to a number
data = np.loadtxt('../data/letter-recognition.data', dtype='float32', delimiter=',',
converters={0: lambda ch: ord(ch) - ord('A')})#20000个
# split the data to two, 10000 each for train and test
train, test = np.vsplit(data, 2)
# split trainData and testData to features and responses
responses, trainData = np.hsplit(train, [1])
labels, testData = np.hsplit(test, [1])
# Initiate the kNN, classify, measure accuracy.
knn = cv2.ml.KNearest_create()
knn.train(trainData, cv2.ml.ROW_SAMPLE, responses)
ret, result, neighbours, dist = knn.findNearest(testData, k=5)
correct = np.count_nonzero(result == labels)
accuracy = correct * 100.0 / 10000
print('准确率', accuracy)#93.06
#准确率 到了 93.22%。同样你可以 增加训练样本的数量来提 准确率。
# save the data
np.savez('knn_data_alphabet.npz', train_alphabet=train, train_labels_alphabet=responses,test_alphabet=testData,test_labels_alphabet=labels)
#怎样预测字母?跟预测数字的一样
| mit |
mjudsp/Tsallis | examples/neighbors/plot_classification.py | 58 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
kingsBSD/MobileMinerPlugin | ckanext-mobileminer/ckanext/mobileminer/st_cluster.py | 1 | 5541 | # Licensed under the Apache License Version 2.0: http://www.apache.org/licenses/LICENSE-2.0.txt
__author__ = 'Giles Richard Greenway'
import dateutil.parser
import time
#import numpy as np
from itertools import tee, izip
#from sklearn import preprocessing
#from sklearn.cluster import KMeans
import requests
def vectors_from_points(points):
import numpy as np
next_points, last_points = tee(points)
next(next_points,None)
for next_point, last_point in izip(next_points,last_points):
#print next_point
lat = next_point[0]
lon = next_point[1]
last_lat = last_point[0]
last_lon = last_point[1]
dt = (next_point[2] - last_point[2]).total_seconds() / 3600.0
d_lat = (lat-last_lat)
d_lon = (lon-last_lon)
if dt > 0.0:
yield np.array([ lat, lon, d_lat/dt, d_lon/dt ])
else:
yield np.array([ lat, lon, 0.0, 0.0 ])
def cluster(points):
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
scaler = preprocessing.StandardScaler(copy=False)
data_points, vector_points = tee(points)
data = scaler.fit_transform(np.array([ i for i in vectors_from_points(vector_points) ]))
if data.size == 0:
return []
mean_centroid_dist = {}
for n_clusters in range(2,30):
k = KMeans(n_clusters=n_clusters)
k.fit(data)
#print "k = ",n_clusters
#print "mean centroid distance = ", k.inertia_
mean_centroid_dist[n_clusters] = k.inertia_
last_mcd = mean_centroid_dist.get(n_clusters-1,False)
if last_mcd:
improve = int(100 * k.inertia_ / last_mcd)
#print "percentage of previous mean centroid distance ",improve
if improve >= 90:
break
cluster_times = dict([(i,[]) for i in range(n_clusters) ])
cluster_sequence = []
data_points.next()
for i, point in enumerate(data_points):
cluster_times[k.labels_[i]].append(point[2])
cluster_sequence.append({'cluster':k.labels_[i],'time':point[2]})
total_days = {}
early_hours = {}
late_hours = {}
common_days = {}
days = dict([ (i,d) for i,d in enumerate(['Mondays','Tuesdays','Wednesdays','Thursdays','Fridays', 'Saturdays', 'Sundays']) ])
day_ratio = 7.0
other_ratio = 7.0 / 6.0
week_ratio = 7.0 / 5.0
weekend_ratio = 7.0 / 2.0
for i in range(n_clusters):
total_days[i] = len(set([ t.date().toordinal() for t in cluster_times[i] ]))
hour_hist = np.histogram([t.hour for t in cluster_times[i]], bins=24, density=False, range=(0,23))[0]
if 0 not in hour_hist:
early, late = (0,23)
else:
thresh = max(hour_hist) / 5.0
above_thresh = [ hour_hist > thresh for h in hour_hist ][0]
transitions= np.diff(above_thresh).nonzero()[0] + 1
transitions += 1
if above_thresh[0]:
transitions = np.r_[0, transitions]
if above_thresh[-1]:
transitions = np.r_[transitions, transitions.size]
transitions.shape = (-1,2)
if transitions.size <> 0:
early, late = transitions[np.argmax([ t[1]-t[0] for t in transitions ])]
else:
early, late = (0,23)
early_hours[i] = early
late_hours[i] = late
day_hist = np.histogram([ t.isoweekday() for t in cluster_times[i] ], bins=7, density=False, range=(1,7) )[0]
for day in range(7):
if day_ratio * day_hist[day] > 5 * other_ratio * sum([day_hist[j] for j in range(7) if j <> day ]):
common_days[i] = days[day]
break
if common_days.get(i,False):
continue
w_days = week_ratio * sum([ day_hist[j] for j in range(5) ])
we_days = weekend_ratio * sum([ day_hist[j] for j in range(5,7) ])
if w_days > 5 * we_days:
common_days[i] = "weekdays"
continue
if we_days > 5 * w_days:
common_days[i] = "weekends"
continue
common_days[i] = "all days"
cluster_sets = []
for i,c in enumerate(k.cluster_centers_):
if len(cluster_times[i]):
point = scaler.inverse_transform(c)
pars = {'format':'json', 'zoom':18, 'addressdetails':1, 'lat':"{:12.8f}".format(point[0]), 'lon':"{:12.8f}".format(point[1])}
try:
resp = requests.get('http://nominatim.openstreetmap.org/reverse',params=pars)
place = resp.json().get('display_name','not found')
osm_id = str(resp.json().get('osm_id','not found'))
except:
place = 'not found'
osm_id = 'not found'
cluster_sets.append({'lat':point[0], 'lon':point[1], 'lat_rate':point[2], 'lon_rate':point[2],
'place':place, 'osm_id':osm_id, 'measurements':len(cluster_times[i]),
'early_hour':early_hours[i], 'late_hour':late_hours[i],
'day_range':int((max(cluster_times[i])-min(cluster_times[i])).total_seconds()/86400.0), 'total_days':total_days[i],
'weekdays':common_days.get(i,"None") })
return cluster_sets,cluster_sequence
| apache-2.0 |
schets/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
jkarnows/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
arkadoel/AprendiendoPython | Udacity_tutorial/p5.py | 1 | 2164 | from pandas import DataFrame, Series
#################
# Syntax Reminder:
#
# The following code would create a two-column pandas DataFrame
# named df with columns labeled 'name' and 'age':
#
# people = ['Sarah', 'Mike', 'Chrisna']
# ages = [28, 32, 25]
# df = DataFrame({'name' : Series(people),
# 'age' : Series(ages)})
def create_dataframe():
'''
Create a pandas dataframe called 'olympic_medal_counts_df' containing
the data from the table of 2014 Sochi winter olympics medal counts.
The columns for this dataframe should be called
'country_name', 'gold', 'silver', and 'bronze'.
There is no need to specify row indexes for this dataframe
(in this case, the rows will automatically be assigned numbered indexes).
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# your code here
olympic_medal_counts_df = DataFrame({'Country' : Series(countries),
'Gold' : Series(gold),
'Silver' : Series(silver),
'Bronze' : Series(bronze) },
columns=['Country', 'Gold', 'Silver', 'Bronze']
)
return (olympic_medal_counts_df)
if __name__ == '__main__':
print(create_dataframe()) | gpl-3.0 |
sgenoud/scikit-learn | sklearn/naive_bayes.py | 1 | 15049 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD Style.
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize, LabelBinarizer
from .utils import array2d, atleast2d_or_csr
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils import deprecated
class BaseNB(BaseEstimator, ClassifierMixin):
"""Abstract base class for naive Bayes estimators"""
__metaclass__ = ABCMeta
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X = np.asarray(X)
y = np.asarray(y)
n_samples, n_features = X.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have incompatible shapes")
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
self.theta_[i, :] = np.mean(X[y == y_i, :], axis=0)
self.sigma_[i, :] = np.var(X[y == y_i, :], axis=0) + epsilon
self.class_prior_[i] = np.float(np.sum(y == y_i)) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in xrange(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / \
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
@property
@deprecated('GaussianNB.class_prior is deprecated'
' and will be removed in version 0.12.'
' Please use ``GaussianNB.class_prior_`` instead.')
def class_prior(self):
return self.class_prior_
@property
@deprecated('GaussianNB.theta is deprecated'
' and will be removed in version 0.12.'
' Please use ``GaussianNB.theta_`` instead.')
def theta(self):
return self.theta_
@property
@deprecated('GaussianNB.sigma is deprecated'
' and will be removed in version 0.12.'
' Please use ``GaussianNB.sigma_`` instead.')
def sigma(self):
return self.sigma_
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
class_prior : array, shape [n_classes]
Custom prior probability per class.
Overrides the fit_prior parameter.
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X)
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
n_classes = len(self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X and y have incompatible shapes."
if issparse(X):
msg += "\nNote: Sparse matrices cannot be indexed w/ boolean \
masks (use `indices=True` in CV)."
raise ValueError(msg)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior:
if len(class_prior) != n_classes:
raise ValueError(
"Number of priors must match number of classes")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
y_freq = Y.sum(axis=0)
self.class_log_prior_ = np.log(y_freq) - np.log(y_freq.sum())
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
N_c, N_c_i = self._count(X, Y)
self.feature_log_prob_ = (np.log(N_c_i + self.alpha)
- np.log(N_c.reshape(-1, 1)
+ self.alpha * X.shape[1]))
return self
@staticmethod
def _count(X, Y):
"""Count feature occurrences.
Returns (N_c, N_c_i), where
N_c is the count of all features in all samples of class c;
N_c_i is the count of feature i in all samples of class c.
"""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
N_c_i = safe_sparse_dot(Y.T, X)
N_c = np.sum(N_c_i, axis=1)
return N_c, N_c_i
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return self.feature_log_prob_[1] if len(self.classes_) == 2 \
else self.feature_log_prob_
def _get_intercept(self):
return self.class_log_prior_[1] if len(self.classes_) == 2 \
else self.class_log_prior_
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha: float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior: boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
Attributes
----------
`intercept_`, `class_log_prior_` : array, shape = [n_classes]
Smoothed empirical log probability for each class.
`feature_log_prob_`, `coef_` : array, shape = [n_classes, n_features]
Empirical log probability of features
given a class, P(x_i|y).
(`intercept_` and `coef_` are properties
referring to `class_log_prior_` and
`feature_log_prob_`, respectively.)
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, Y)
MultinomialNB(alpha=1.0, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
"""
def __init__(self, alpha=1.0, fit_prior=True):
self.alpha = alpha
self.fit_prior = fit_prior
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha: float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize: float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior: boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234–265.
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41–48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
def _count(self, X, Y):
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return super(BernoulliNB, self)._count(X, Y)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
X_neg_prob = (neg_prob.sum(axis=1)
- safe_sparse_dot(X, neg_prob.T))
jll = safe_sparse_dot(X, self.feature_log_prob_.T) + X_neg_prob
return jll + self.class_log_prior_
| bsd-3-clause |
Obus/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
rbaravalle/imfractal | tests/testcomparison.py | 1 | 8144 | """
Copyright (c) 2013 Rodrigo Baravalle
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from imfractal import *
import Image
import time
import csv
import sys
import os
from subprocess import *
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation
from sklearn import svm
import matplotlib.pyplot as plt
from pylab import *
import csv
def opencsv(filename):
array = []
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile)
i = 0
for row in spamreader:
array[i] = row
i = i+1
return np.array(array)
# CSV WRITE
def writecsv(filename,array):
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerows(array)
def do_test(minn,vals):
#cant = 10
dfs = 10
dDFs = 2*dfs+1
cantClasses = 2
computeMFS = False
pathbtr = 'images/train2/bread/'
dirListbtr=os.listdir(pathbtr)
cantTrainB = 10#len(dirListbtr)
#pathnbtr = 'images/train2/nonbread/'
#dirListnbtr=os.listdir(pathnbtr)
#cantTrainNB = 1#len(dirListnbtr)
pathbte = 'images/test4/bread/'
dirListbte=os.listdir(pathbte)
cantTestB = 2#len(dirListbte)
#pathnbte = 'images/test2/nonbread/'
#dirListnbte=os.listdir(pathnbte)
#cantTestNB = 1#len(dirListnbte)
breadtrain = np.zeros((cantTrainB, dDFs)).astype(np.float32)
print breadtrain[0].shape
breadtest = np.zeros((cantTestB, dDFs)).astype(np.float32)
#nonbreadtrain = np.zeros((cantTrainNB, dDFs)).astype(np.float32)
#nonbreadtest = np.zeros((cantTestNB, dDFs)).astype(np.float32)
ins = Sandbox(dfs)
if(computeMFS):
print 'Training: computing sandbox MFS for the bread database...'
ins.setDef(40,1.02,True)
#print "Computing " + str(cantTrainB) +" bread train..."
for i in range(cantTrainB):
filename = pathbtr+dirListbtr[i]
breadtrain[i] = ins.getFDs(filename)
else:
print "Loading CSV"
with open('breadtrainS.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
i = 0
for row in spamreader:
breadtrain[i] = row[1:]
i = i+1
#print "Computing "+ str(cantTestB) +" bread test..."
ins.setDef(40,1.15,False)
for i in range(cantTestB):
filename = pathbte+dirListbte[i]
breadtest[i] = ins.getFDs(filename)
#ins.setDef(40,1.02,True)
#print "Computing "+str(cantTrainNB)+" non bread train..."
#for i in range(cantTrainNB):
# filename = pathnbtr+dirListnbtr[i]
# nonbreadtrain[i] = ins.getFDs(filename)
#ins.setDef(40,1.02,True)
#print "Computing "+ str(cantTestNB) +" non bread test..."
#for i in range(cantTestNB):
# filename = pathnbte+dirListnbte[i]
# nonbreadtest[i] = ins.getFDs(filename)
fsize = 14
if(computeMFS):
#data = np.vstack((breadtrain,breadtest))#,nonbreadtrain))
labelsbtr = np.zeros((len(breadtrain),1)) + 1
#labelsnbtr = np.zeros((len(nonbreadtrain),1)) + 2
#labelsbte = np.zeros((len(breadtest),1)) + 1
#labelsnbte = np.zeros((len(nonbreadtest),1)) + 2
#labels = np.hstack((labelsbtr[:,0],labelsbte[:,0],labelsnbtr[:,0],labelsnbte[:,0]))
print "Saving CSVs for SOM"
print "Shapes: labelsbtr: ", labelsbtr.shape, "breadtrain: ",breadtrain.shape
writecsv('breadtrainS.csv',np.hstack((labelsbtr,breadtrain)) )
#writecsv('breadtestS.csv',np.hstack((labelsbte,breadtest)) )
#writecsv('nonbreadtrainS.csv',np.hstack((labelsnbtr,nonbreadtrain)) )
#writecsv('nonbreadtestS.csv',np.hstack((labelsnbte,nonbreadtest)) )
#exit()
if(False):
#print "Testing..."
#print "1 = Bread"
#print "2 = Nonbread"
train = np.vstack((breadtrain,nonbreadtrain))
labels = np.hstack((labelsbtr[:,0],labelsnbtr[:,0]))
test = np.vstack((breadtest,nonbreadtest))
lin_svc = svm.LinearSVC(C=1.0).fit(train, labels)
predictionsSVM = lin_svc.predict(test)
cfr = RandomForestClassifier(n_estimators=120)
cfr.fit(train,labels) # train
gtruth = np.hstack((labelsbte[:,0],labelsnbte[:,0]))
predictionsRF = cfr.predict(test) # test
print dirListbte
print dirListbtr
print "Random Forest Prediction:"
print predictionsRF[:cantTestB]
print predictionsRF[cantTestB:]
print "SVM Prediction:"
print predictionsSVM[:cantTestB]
print predictionsSVM[cantTestB:]
print "REAL: "
print gtruth
x = np.arange(dDFs)
#plt.ylabel(r'$f(alpha)$',fontsize=fsize)
#plt.xlabel('alpha',fontsize=fsize)
#plt.plot(x, breadtrain.T, 'k+--', label='bread train',linewidth=2.0)
#plt.plot(x, breadtest.T, 'g+--', label='bread test',linewidth=2.0)
#plt.plot(x, nonbreadtrain.T, 'r*--', label='non bread train',linewidth=2.0)
#plt.plot(x, nonbreadtest.T, 'b*--', label='non bread test',linewidth=2.0)
#plt.legend(loc = 3)
#plt.show()
y0 = 1.2
y1 = 3
plt.figure()
#plt.subplot(122)
plt.ylim((y0,y1))
#plt.xlabel('Real Breads',fontsize=fsize)
b = plt.boxplot(np.vstack((breadtrain)),sym="")
mediansReal = map(lambda i: i.get_data()[1][0],b['medians'])
x = np.arange(len(mediansReal))
#plt.subplot(121)
#plt.ylim((y0, y1))
plt.xlabel('$q$',fontsize=fsize)
b = plt.boxplot(np.vstack((breadtest)),sym="")
mediansSynth = map(lambda i: i.get_data()[1][0],b['medians'])
#plt.show()
err = sum(abs(np.array(mediansReal)-np.array(mediansSynth)))
err1 = sum(abs(np.array(mediansReal[:dfs])-np.array(mediansSynth[:dfs])))
err2 = sum(abs(np.array(mediansReal[dfs:])-np.array(mediansSynth[dfs:])))
qs = range(-dfs,dfs+1)
xticks(x+1,qs) # translate
print "ERROR: ", err
if(err < minn):
plt.plot(x+1, mediansReal, 'k+--', label='real',linewidth=2.0)
plt.plot(x+1, mediansSynth, 'b+--', label='synthetic',linewidth=2.0)
plt.legend(loc=4)
plt.ylabel(r'$D_{q}$',fontsize=fsize)
plt.xlabel(r'$q$',fontsize=fsize)
plt.title(vals)
savefig('best_boxplot'+str(err)+'_'+'_.png')
return err, err1, err2
if(False):
scoreRF = (len(gtruth)-sum(abs(gtruth-predictionsRF)))/float(len(gtruth))
scoreSVM = (len(gtruth)-sum(abs(gtruth-predictionsSVM)))/float(len(gtruth))
#scores = cross_validation.cross_val_score(cfr, data, labels, cv=4)
print "Classification performance (Random Forest classifier): " + str( scoreRF*100 ) + "%"
print "Classification performance (Support Vector Machine classifier): " + str( scoreSVM*100 ) + "%"
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
vascotenner/holoviews | holoviews/core/data/pandas.py | 1 | 8030 | from __future__ import absolute_import
from distutils.version import LooseVersion
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
import pandas as pd
from .interface import Interface
from ..dimension import Dimension
from ..element import Element, NdElement
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check
from .. import util
class PandasInterface(Interface):
types = (pd.DataFrame if pd else None,)
datatype = 'dataframe'
@classmethod
def dimension_type(cls, columns, dim):
name = columns.get_dimension(dim).name
idx = list(columns.data.columns).index(name)
return columns.data.dtypes[idx].type
@classmethod
def init(cls, eltype, data, kdims, vdims):
element_params = eltype.params()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if util.is_dataframe(data):
ndim = len(kdim_param.default) if kdim_param.default else None
if kdims and vdims is None:
vdims = [c for c in data.columns if c not in kdims]
elif vdims and kdims is None:
kdims = [c for c in data.columns if c not in vdims][:ndim]
elif kdims is None and vdims is None:
kdims = list(data.columns[:ndim])
vdims = [] if ndim is None else list(data.columns[ndim:])
else:
# Check if data is of non-numeric type
# Then use defined data type
kdims = kdims if kdims else kdim_param.default
vdims = vdims if vdims else vdim_param.default
columns = [d.name if isinstance(d, Dimension) else d
for d in kdims+vdims]
if ((isinstance(data, dict) and all(c in data for c in columns)) or
(isinstance(data, NdElement) and all(c in data.dimensions() for c in columns))):
data = cyODict(((d, data[d]) for d in columns))
elif isinstance(data, dict) and not all(d in data for d in columns):
column_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v))
for k, v in data.items()))
data = cyODict(((c, col) for c, col in zip(columns, column_data)))
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._1d:
data = np.atleast_2d(data).T
else:
data = (range(len(data)), data)
else:
data = tuple(data[:, i] for i in range(data.shape[1]))
if isinstance(data, tuple):
data = [np.array(d) if not isinstance(d, np.ndarray) else d for d in data]
if not cls.expanded(data):
raise ValueError('PandasInterface expects data to be of uniform shape.')
data = pd.DataFrame.from_items([(c, d) for c, d in
zip(columns, data)])
else:
data = pd.DataFrame(data, columns=columns)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def range(cls, columns, dimension):
column = columns.data[columns.get_dimension(dimension).name]
if column.dtype.kind == 'O':
if (not isinstance(columns.data, pd.DataFrame) or
LooseVersion(pd.__version__) < '0.17.0'):
column = column.sort(inplace=False)
else:
column = column.sort_values()
return column.iloc[0], column.iloc[-1]
else:
return (column.min(), column.max())
@classmethod
def concat(cls, columns_objs):
cast_objs = cls.cast(columns_objs)
return pd.concat([col.data for col in cast_objs])
@classmethod
def groupby(cls, columns, dimensions, container_type, group_type, **kwargs):
index_dims = [columns.get_dimension(d) for d in dimensions]
element_dims = [kdim for kdim in columns.kdims
if kdim not in index_dims]
group_kwargs = {}
if group_type != 'raw' and issubclass(group_type, Element):
group_kwargs = dict(util.get_param_values(columns),
kdims=element_dims)
group_kwargs.update(kwargs)
data = [(k, group_type(v, **group_kwargs)) for k, v in
columns.data.groupby(dimensions, sort=False)]
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(data, kdims=index_dims)
else:
return container_type(data)
@classmethod
def aggregate(cls, columns, dimensions, function, **kwargs):
data = columns.data
cols = [d.name for d in columns.kdims if d in dimensions]
vdims = columns.dimensions('value', True)
reindexed = data.reindex(columns=cols+vdims)
if len(dimensions):
return reindexed.groupby(cols, sort=False).aggregate(function, **kwargs).reset_index()
else:
agg = reindexed.apply(function, **kwargs)
return pd.DataFrame.from_items([(col, [v]) for col, v in
zip(agg.index, agg.values)])
@classmethod
def unpack_scalar(cls, columns, data):
"""
Given a columns object and data in the appropriate format for
the interface, return a simple scalar.
"""
if len(data) != 1 or len(data.columns) > 1:
return data
return data.iat[0,0]
@classmethod
def reindex(cls, columns, kdims=None, vdims=None):
# DataFrame based tables don't need to be reindexed
return columns.data
@classmethod
def redim(cls, dataset, dimensions):
column_renames = {k: v.name for k, v in dimensions.items()}
return dataset.data.rename(columns=column_renames)
@classmethod
def sort(cls, columns, by=[]):
import pandas as pd
if not isinstance(by, list): by = [by]
if not by: by = range(columns.ndims)
cols = [columns.get_dimension(d).name for d in by]
if (not isinstance(columns.data, pd.DataFrame) or
LooseVersion(pd.__version__) < '0.17.0'):
return columns.data.sort(columns=cols)
return columns.data.sort_values(by=cols)
@classmethod
def select(cls, columns, selection_mask=None, **selection):
df = columns.data
if selection_mask is None:
selection_mask = cls.select_mask(columns, selection)
indexed = cls.indexed(columns, selection)
df = df.ix[selection_mask]
if indexed and len(df) == 1:
return df[columns.vdims[0].name].iloc[0]
return df
@classmethod
def values(cls, columns, dim, expanded=True, flat=True):
data = columns.data[dim]
if util.dd and isinstance(data, util.dd.Series):
data = data.compute()
if not expanded:
return util.unique_array(data)
return np.array(data)
@classmethod
def sample(cls, columns, samples=[]):
data = columns.data
mask = False
for sample in samples:
sample_mask = True
if np.isscalar(sample): sample = [sample]
for i, v in enumerate(sample):
sample_mask = np.logical_and(sample_mask, data.iloc[:, i]==v)
mask |= sample_mask
return data[mask]
@classmethod
def add_dimension(cls, columns, dimension, dim_pos, values, vdim):
data = columns.data.copy()
if dimension.name not in data:
data.insert(dim_pos, dimension.name, values)
return data
@classmethod
def dframe(cls, columns, dimensions):
if dimensions:
return columns.reindex(dimensions).data.copy()
else:
return columns.data.copy()
Interface.register(PandasInterface)
| bsd-3-clause |
naturali/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 30 | 4777 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()}
class _FeedingFunctionsTestCase(tf.test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6
+ [[0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
MatthieuBizien/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/tests/test_pipeline.py | 29 | 15239 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
AIML/scikit-learn | sklearn/ensemble/tests/test_forest.py | 57 | 35265 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
rrohan/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
linebp/pandas | pandas/tests/io/formats/test_to_latex.py | 1 | 12983 | from datetime import datetime
import pytest
import pandas as pd
from pandas import DataFrame, compat, Series
from pandas.util import testing as tm
from pandas.compat import u
import codecs
@pytest.fixture
def frame():
return DataFrame(tm.getSeriesData())
class TestToLatex(object):
def test_to_latex_filename(self, frame):
with tm.ensure_clean('test.tex') as path:
frame.to_latex(path)
with open(path, 'r') as f:
assert frame.to_latex() == f.read()
# test with utf-8 and encoding option (GH 7061)
df = DataFrame([[u'au\xdfgangen']])
with tm.ensure_clean('test.tex') as path:
df.to_latex(path, encoding='utf-8')
with codecs.open(path, 'r', encoding='utf-8') as f:
assert df.to_latex() == f.read()
# test with utf-8 without encoding option
if compat.PY3: # python3: pandas default encoding is utf-8
with tm.ensure_clean('test.tex') as path:
df.to_latex(path)
with codecs.open(path, 'r', encoding='utf-8') as f:
assert df.to_latex() == f.read()
else:
# python2 default encoding is ascii, so an error should be raised
with tm.ensure_clean('test.tex') as path:
with pytest.raises(UnicodeEncodeError):
df.to_latex(path)
def test_to_latex(self, frame):
# it works!
frame.to_latex()
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex()
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_format(self, frame):
# GH Bug #9402
frame.to_latex(column_format='ccc')
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(column_format='ccc')
withindex_expected = r"""\begin{tabular}{ccc}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
def test_to_latex_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False],
'datetime64': [datetime(2016, 1, 1),
datetime(2016, 2, 5),
datetime(2016, 3, 3)]})
formatters = {'int': lambda x: '0x%x' % x,
'float': lambda x: '[% 4.1f]' % x,
'object': lambda x: '-%s-' % str(x),
'datetime64': lambda x: x.strftime('%Y-%m'),
'__index__': lambda x: 'index: %s' % x}
result = df.to_latex(formatters=dict(formatters))
expected = r"""\begin{tabular}{llrrl}
\toprule
{} & datetime64 & float & int & object \\
\midrule
index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_multiindex(self):
df = DataFrame({('x', 'y'): ['a']})
result = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & x \\
{} & y \\
\midrule
0 & a \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.T.to_latex()
expected = r"""\begin{tabular}{lll}
\toprule
& & 0 \\
\midrule
x & y & a \\
\bottomrule
\end{tabular}
"""
assert result == expected
df = DataFrame.from_dict({
('c1', 0): pd.Series(dict((x, x) for x in range(4))),
('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c2', 0): pd.Series(dict((x, x) for x in range(4))),
('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c3', 0): pd.Series(dict((x, x) for x in range(4))),
}).T
result = df.to_latex()
expected = r"""\begin{tabular}{llrrrr}
\toprule
& & 0 & 1 & 2 & 3 \\
\midrule
c1 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c2 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
assert result == expected
# GH 14184
df = df.T
df.columns.names = ['a', 'b']
result = df.to_latex()
expected = r"""\begin{tabular}{lrrrrr}
\toprule
a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
b & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 4 & 0 & 4 & 0 \\
1 & 1 & 5 & 1 & 5 & 1 \\
2 & 2 & 6 & 2 & 6 & 2 \\
3 & 3 & 7 & 3 & 7 & 3 \\
\bottomrule
\end{tabular}
"""
assert result == expected
# GH 10660
df = pd.DataFrame({'a': [0, 0, 1, 1],
'b': list('abab'),
'c': [1, 2, 3, 4]})
result = df.set_index(['a', 'b']).to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
0 & a & 1 \\
& b & 2 \\
1 & a & 3 \\
& b & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.groupby('a').describe().to_latex()
expected = r"""\begin{tabular}{lrrrrrrrr}
\toprule
{} & \multicolumn{8}{l}{c} \\
{} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
a & & & & & & & & \\
\midrule
0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_multicolumnrow(self):
df = pd.DataFrame({
('c1', 0): dict((x, x) for x in range(5)),
('c1', 1): dict((x, x + 5) for x in range(5)),
('c2', 0): dict((x, x) for x in range(5)),
('c2', 1): dict((x, x + 5) for x in range(5)),
('c3', 0): dict((x, x) for x in range(5))
})
result = df.to_latex()
expected = r"""\begin{tabular}{lrrrrr}
\toprule
{} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.to_latex(multicolumn=False)
expected = r"""\begin{tabular}{lrrrrr}
\toprule
{} & c1 & & c2 & & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.T.to_latex(multirow=True)
expected = r"""\begin{tabular}{llrrrrr}
\toprule
& & 0 & 1 & 2 & 3 & 4 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
df.index = df.T.index
result = df.T.to_latex(multirow=True, multicolumn=True,
multicolumn_format='c')
expected = r"""\begin{tabular}{llrrrrr}
\toprule
& & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
& & 0 & 1 & 0 & 1 & 0 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_escape(self):
a = 'a'
b = 'b'
test_dict = {u('co^l1'): {a: "a",
b: "b"},
u('co$e^x$'): {a: "a",
b: "b"}}
unescaped_result = DataFrame(test_dict).to_latex(escape=False)
escaped_result = DataFrame(test_dict).to_latex(
) # default: escape=True
unescaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co$e^x$ & co^l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
escaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co\$e\textasciicircumx\$ & co\textasciicircuml1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
assert unescaped_result == unescaped_expected
assert escaped_result == escaped_expected
def test_to_latex_longtable(self, frame):
frame.to_latex(longtable=True)
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(longtable=True)
withindex_expected = r"""\begin{longtable}{lrl}
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False, longtable=True)
withoutindex_expected = r"""\begin{longtable}{rl}
\toprule
a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
1 & b1 \\
2 & b2 \\
\end{longtable}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_escape_special_chars(self):
special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^',
'\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
assert observed == expected
def test_to_latex_no_header(self):
# GH 7124
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=False)
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False, header=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_specified_header(self):
# GH 7124
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=['AA', 'BB'])
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & AA & BB \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(header=['AA', 'BB'], index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
AA & BB \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
withoutescape_result = df.to_latex(header=['$A$', '$B$'], escape=False)
withoutescape_expected = r"""\begin{tabular}{lrl}
\toprule
{} & $A$ & $B$ \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutescape_result == withoutescape_expected
with pytest.raises(ValueError):
df.to_latex(header=['A'])
def test_to_latex_decimal(self, frame):
# GH 12031
frame.to_latex()
df = DataFrame({'a': [1.0, 2.1], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(decimal=',')
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1,0 & b1 \\
1 & 2,1 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
def test_to_latex_series(self):
s = Series(['a', 'b', 'c'])
withindex_result = s.to_latex()
withindex_expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & a \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
| bsd-3-clause |
EtienneCmb/tensorpac | examples/misc/plot_frequency_vectors.py | 1 | 1758 | """
========================
Define frequency vectors
========================
In Tensorpac, you can define your phase and amplitude vectors in sevral ways :
* Manually define one band (ex : [2, 4])
* Define multiple bands using a list/tuple/array (ex : [[2, 4], [5, 7]])
* Using a (start, stop width step) definition. The latter lets you create regulary spaced bands [[start, start+width], [start+step, start+step+width],
[start+2*step, start+2*step+width], ...]
* Using a range : np.arange(1, 6) => [[1, 2], [2, 3], [3, 4], [4, 5]]
"""
import matplotlib.pyplot as plt
import numpy as np
from tensorpac.utils import pac_vec
def plot(nb, pvec, avec, title):
"""Plotting function."""
pvecm = pvec.mean(1)
avecm = avec.mean(1)
plt.subplot(1, 4, nb)
plt.vlines(pvecm, -10, 500, color='#ab4642')
plt.hlines(avecm, -10, 500, color='slateblue')
plt.xlabel('Frequency for phase (hz')
plt.ylabel('Frequency for amplitude.mean(1)')
plt.title(title)
plt.xlim([0, 30])
plt.ylim([60, 200])
plt.figure(figsize=(25, 5))
# 1 - Manual defintion :
fpha = [2, 4]
famp = [60, 160]
pvec1, avec1 = pac_vec(fpha, famp)
plot(1, pvec1, avec1, '1 - One frequency band')
# 2 - List/tuple/array :
fpha = [[2, 4], [5, 7], [8, 13]]
famp = ([60, 160], [60, 200])
pvec2, avec2 = pac_vec(fpha, famp)
plot(2, pvec2, avec2, 'Manually define several frequency bands')
# 3 - (start, end, width, step) :
fpha = (1, 30, 2, 1)
famp = (60, 200, 10, 5)
pvec3, avec3 = pac_vec(fpha, famp)
plot(3, pvec3, avec3, 'Use the (start, stop, width, step definition')
# 4 - Range :
fpha = np.arange(1, 20)
famp = np.arange(60, 200, 10)
pvec4, avec4 = pac_vec(fpha, famp)
plot(4, pvec4, avec4, 'Using a range definition')
plt.show()
| bsd-3-clause |
emotrix/Emotrix | emotrix/emotrix.py | 1 | 10735 | # -*- coding: utf-8 -*-
# **********************************************************************************************************************
# Archivo: emotrix.py
# Proposito: Este archivo es el encargado de gestionar todas las llamadas al API, para trabajar con EMOTRIX
# Autor: Henzer Garcia 12538
# **********************************************************************************************************************
from RawData import RawData
import numpy as np
import matplotlib.pyplot as plt
import pywt
from sklearn import svm
from sklearn.model_selection import cross_val_score
from random import shuffle
from sklearn import preprocessing
#Definicion de la clase.
class Emotrix():
#Definicion del metodo init
def __init__(self):
self.clf = svm.SVC()
#Metodo utilizado para obtener un bloque/segmento especifico, segun la etiqueta enviada como parametro.
def get_block(self, blocks, tag):
for block in blocks:
if block.tag == tag:
return block
#Metodo utilizado para obtener las graficas de la data, para un usuario especifico.
def graph(self, file_data="user1.csv"):
raw = RawData(file_data)
blocks = raw.get_blocks_data()
sad_block = self.get_block(blocks, tag='SAD')
happy_block = self.get_block(blocks, tag='HAPPY')
neutral_block = self.get_block(blocks, tag='NEUTRAL')
#Preparando lo necesario para mostrar la grafica.
plt.figure(1, figsize=(20, 10))
plt.title(u"Señal EEG")
plt.subplot(411)
plt.plot(sad_block.f3, 'r')
plt.plot(happy_block.f3, 'b')
plt.plot(neutral_block.f3, 'k')
plt.ylabel("F3")
plt.subplot(412)
plt.plot(sad_block.f4, 'r')
plt.plot(happy_block.f4, 'b')
plt.plot(neutral_block.f4, 'k')
plt.ylabel("F4")
plt.subplot(413)
plt.plot(sad_block.af3, 'r')
plt.plot(happy_block.af3, 'b')
plt.plot(neutral_block.af3, 'k')
plt.ylabel("AF3")
plt.subplot(414)
plt.plot(sad_block.af4, 'r')
plt.plot(happy_block.af4, 'b')
plt.plot(neutral_block.af4, 'k')
plt.ylabel("AF4")
#Se guarda la grafica como imagen.
plt.savefig('../../imagenes/sad_EEG.png')
plt.show()
#Funcion que define la transformacion que se aplicara a la data, puede ser PSD o no.
def transf(self, signal):
#return signal
return np.abs(signal)**2
#Funcion para entrenar el algoritmo, recibe como parametro el prefijo del archivo que contiene la data de entrenamiento.
def training(self, file_data="user"):
raw = RawData(file_data)
blocks = raw.get_blocks_data()
block = self.get_block(blocks, 'SAD')
block_happy = self.get_block(blocks, 'HAPPY')
block_neutral = self.get_block(blocks, 'NEUTRAL')
#Definicion de los parametros para la aplicacion de la tranformada wavelet
level = 4
wavelet = 'db4'
sensors = [raw.f3, raw.f4, raw.af3, raw.af4]
for i in range(4):
n = len(sensors[i])
#Aplicacion de la transformada wavelet.
cA4, cD4, cD3, cD2, cD1 = pywt.wavedec(sensors[i], wavelet=wavelet, level=level)
#Extraccion de las 5 bandas de frecuencia.
delta = pywt.upcoef('a', cA4, wavelet=wavelet, level=level, take=n)
theta = pywt.upcoef('d', cD4, wavelet=wavelet, level=level, take=n)
alpha = pywt.upcoef('d', cD3, wavelet=wavelet, level=3, take=n)
beta = pywt.upcoef('d', cD2, wavelet=wavelet, level=2, take=n)
gamma = pywt.upcoef('d', cD1, wavelet=wavelet, level=1, take=n)
#Despligue de la grafica luego de la aplicacion de la transformada
plt.figure(i, figsize=(20, 10))
plt.subplot(511)
plt.plot(self.transf(delta))
plt.ylabel("delta")
plt.subplot(512)
plt.plot(self.transf(theta))
plt.ylabel("theta")
plt.subplot(513)
plt.plot(self.transf(alpha))
plt.ylabel("alpha")
plt.subplot(514)
plt.plot(self.transf(beta))
plt.ylabel("beta")
plt.subplot(515)
plt.plot(self.transf(gamma))
plt.ylabel("gamma")
plt.savefig('../../imagenes/F'+str(i+1)+'ANEXOS_'+user+'.png')
plt.show()
# Funcion definida para el entrenamiento del algoritmo
def training2(self, file_data="user"):
# Se leen los archivos con toda la data.
raw = RawData(file_data, several_files=True)
# Se obtiene la data, devidida por segmentos.
blocks = raw.get_blocks_data()
shuffle(blocks)
c = [0, 0, 0]
for block in blocks:
if block.tag == 'HAPPY':
c[0] += 1
if block.tag == 'SAD':
c[1] += 1
if block.tag == 'NEUTRAL':
c[2] += 1
# Se imprime el numero de muestras
print '################ NUMERO DE MUESTRAS ###############'
print str(c[0]) + ' HAPPY'
print str(c[1]) + ' SAD'
print str(c[2]) + ' NEUTRAL'
# Se aplica una sanitizacion de los datos, antes de implementar SVM
result = self.feature_extraction(blocks)
X = result['features']
y = result['tags']
self.clf = svm.SVC(kernel='linear', C=0.001)
scores = cross_val_score(self.clf, X, y, cv=5, verbose=1)
print ("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
self.clf.fit(X, y)
print "El algoritmo esta listo para usuarse"
#Funcion definida para realizar pruebas, durante el entrenamiento.
def getAccuracy(self, file_data="user"):
#Se leen los archivos con toda la data.
raw = RawData(file_data, several_files=True)
#Se obtiene la data, devidida por segmentos.
blocks = raw.get_blocks_data()
shuffle(blocks)
c = [0, 0, 0]
for block in blocks:
if block.tag == 'HAPPY':
c[0] += 1
if block.tag == 'SAD':
c[1] += 1
if block.tag == 'NEUTRAL':
c[2] += 1
#Se imprime el numero de muestras
print '################ NUMERO DE MUESTRAS ###############'
print str(c[0]) + ' HAPPY'
print str(c[1]) + ' SAD'
print str(c[2]) + ' NEUTRAL'
#Se aplica una sanitizacion de los datos, antes de implementar SVM
result = self.feature_extraction(blocks)
X = result['features']
y = result['tags']
#Se realizar CROSS VALIDATION utilizando una variación de parametros.
for c in np.arange(0.001, 1, 0.5):
print "################ C=" + str(c) + " #########################"
self.clf = svm.SVC(kernel='linear', C=c)
scores = cross_val_score(self.clf, X, y, cv=5, verbose=1)
print ("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
for c in np.arange(1, 100, 10):
print "################ C=" + str(c) + " #########################"
self.clf = svm.SVC(kernel='linear', C=c)
scores = cross_val_score(self.clf, X, y, cv=5, verbose=1)
print ("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
for c in np.arange(200, 2000, 200):
print "################ C=" + str(c) + " #########################"
self.clf = svm.SVC(kernel='linear', C=c)
scores = cross_val_score(self.clf, X, y, cv=5, verbose=1)
print ("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#Funcion definida para la extraccion de caracteristicas de las señales EEG.
def feature_extraction(self, blocks):
print 'Feature extraction'
result = {
"features": [],
"tags": []
}
i = 0
n = len(blocks)
level = 4
wavelet = 'db4'
for block in blocks:
feature = []
electrodes = [block.f3, block.f4, block.af3, block.af4]
for electrode in electrodes:
#Se obtienen las caracteristicas en el dominio del tiempo, media y desviacion estandar.
feature.append(np.mean(electrode))
feature.append(np.std(electrode))
size = len(electrode)
#Se aplica la transformada wavelet, para la separacion de las bandas de frecuencia.
wavelet_result = pywt.wavedec(electrode, wavelet='db4', level=level)
alpha = pywt.upcoef('d', wavelet_result[2], wavelet='db4', level=3, take=size)
beta = pywt.upcoef('d', wavelet_result[3], wavelet='db4', level=2, take=size)
gamma = pywt.upcoef('d', wavelet_result[4], wavelet='db4', level=1, take=size)
#Se obtienen las caracteristicas en el dominio de la frecuencia, media y desviacion estandar.
#ALPHA
feature.append(np.mean(self.transf(alpha)))
feature.append(np.std(self.transf(alpha)))
#BETA
feature.append(np.mean(self.transf(beta)))
feature.append(np.std(self.transf(beta)))
#GAMMA
feature.append(np.mean(self.transf(gamma)))
feature.append(np.std(self.transf(gamma)))
result['features'].append(feature)
result['tags'].append(block.tag)
return result
#Devuelve un listado de las emociones que el SDK de EMOTRIX puede determinar.
def get_available_emotions(self):
pass
#Obtiene la version del SDK de EMOTRIX.
def get_version(self):
pass
#Obtiene la informacion en bruto de las señales EEG
def get_raw_data(self):
pass
#Determina la emocion predominante en el sujeto.
def get_current_emotion(self):
pass
#Obtiene una lista con las emociones actuales del sujeto.
def get_current_emotions(self):
pass
#Obtiene la informacion proporcionada por la pulsera.
def get_hand_position(self):
pass
#Obtiene el ritmo cardiaco del sujeto
def get_heart_beats(self):
pass
#Obtiene una lista con los electrodos que utiliza EMOTRIX.
def get_electrodes(self):
pass
def detect_emotion(self, block):
features = self.feature_extraction([block])
print features
return self.clf.predict(features['features'])
e = Emotrix()
e.pruebas(file_data='user')
e.training(file_data='C:/Users/ANGELFRANCISCOMORALE/Desktop/data/17F2238-data.csv')
e.graph() | bsd-2-clause |
linksuccess/linksuccess | parsingframework/HypTrails.py | 1 | 6551 | # further implementations can be found:
# Python: https://github.com/psinger/hyptrails
# Java: https://bitbucket.org/florian_lemmerich/hyptrails4j
# Apache spark: http://dmir.org/sparktrails/
# also see: http://www.philippsinger.info/hyptrails/
from __future__ import division
import itertools
from scipy.sparse import csr_matrix
from scipy.special import gammaln
from collections import defaultdict
from sklearn.preprocessing import normalize
from scipy.sparse.sparsetools import csr_scale_rows
import numpy as np
class HypTrails():
"""
HypTrails
"""
def __init__(self, vocab=None):
"""
Constructor for class HypTrails
Args:
vocab: optional vocabulary mapping states to indices
"""
self.vocab = vocab
self.state_count = len(vocab)
def fit(self, transitions_matrix):
"""
Function for fitting the Markov Chain model given data
Args:
sequences: Data of sequences, list of lists
"""
self.transitions = transitions_matrix
#print "fit done"
def evidence(self, hypothesis, structur, k=1, prior=1., norm=True):
"""
Determines Bayesian evidence given fitted model and hypothesis
Args:
hypothesis: Hypothesis csr matrix,
indices need to map those of transition matrix
k: Concentration parameter k
prior: proto Dirichlet prior
norm: Flag for normalizing hypothesis matrix
Returns
evidence
"""
# care with copy here
hypothesis = csr_matrix(hypothesis, copy=True)
structur = csr_matrix(structur, copy=True)
pseudo_counts = k * self.state_count
if hypothesis.size != 0:
# in case of memory issues set copy to False but then care about changed hypothesis matrix
if norm == True:
#print "in norm"
norm_h = hypothesis.sum(axis=1)
n_nzeros = np.where(norm_h > 0)
norm_h[n_nzeros] = 1.0 / norm_h[n_nzeros]
norm_h = np.array(norm_h).T[0]
#print "in place mod"
# modify sparse_csc_matrix in place
csr_scale_rows(hypothesis.shape[0],
hypothesis.shape[1],
hypothesis.indptr,
hypothesis.indices,
hypothesis.data, norm_h)
# distribute pseudo counts to matrix, row-based approach
hypothesis = hypothesis * pseudo_counts
#print "after pseude counts"
# also consider those rows which only include zeros
norma = hypothesis.sum(axis=1)
n_zeros,_ = np.where(norma == 0)
hypothesis[n_zeros,:] = pseudo_counts / self.state_count
else:
#print "in norm"
norm_h = hypothesis.sum(axis=1)
n_nzeros = np.where(norm_h > 0)
norm_h[n_nzeros] = 1.0 / norm_h[n_nzeros]
norm_h = np.array(norm_h).T[0]
#print "in place mod"
# modify sparse_csc_matrix in place
csr_scale_rows(hypothesis.shape[0],
hypothesis.shape[1],
hypothesis.indptr,
hypothesis.indices,
hypothesis.data, norm_h)
# distribute pseudo counts to matrix, row-based approach
#TODO check if this line should be placed after the zero_rows_norm() call????
hypothesis = hypothesis * pseudo_counts
#self.zero_rows_norm(hypothesis, structur,k)
self.zero_rows_norm_eff1(hypothesis, structur, k)
else:
# if hypothesis matrix is empty, we can simply increase the proto prior parameter
prior += k
# transition matrix with additional Dirichlet prior
# not memory efficient
transitions_prior = self.transitions.copy()
transitions_prior = transitions_prior + hypothesis
#print "after copy"
# elegantly calculate evidence
evidence = 0
evidence += gammaln(hypothesis.sum(axis=1)+self.state_count*prior).sum()
evidence -= gammaln(self.transitions.sum(axis=1)+hypothesis.sum(axis=1)+self.state_count*prior).sum()
evidence += gammaln(transitions_prior.data+prior).sum()
evidence -= gammaln(hypothesis.data+prior).sum() + (len(transitions_prior.data)-len(hypothesis.data)) * gammaln(prior)
return evidence
def zero_rows_norm(self, hypothesis, structur,k):
norma = hypothesis.sum(axis=1)
n_zeros = np.where(norma == 0)
print 'n_zeros'
print len(n_zeros[0])
for x, i in enumerate(n_zeros[0]):
if x % 1000 == 0:
print x, len(n_zeros[0])
links = np.where(structur[i,:]!=0)
hypothesis[i,links[0]] = k / len(links[0])
print 'n_zeros done'
# def zero_rows_norm_eff(self,hypothesis, structur):
# #find zero sum rows in hypothesis
# print 'sum hyp'
# norma = hypothesis.sum(axis=1)
# n_zeros = np.where(norma == 0)
# # norm the structure matrix
# print 'sum structure'
# tmp = structur[n_zeros]
# norm_s = tmp.sum(axis=1)
# norm_s = np.array(norm_s).T[0]
# tmp = tmp/norm_s[:,None]
# #replece the zero rows in hypothesis with the corresponding rows in the normed strcuture matrix
# print 'replace'
# hypotheis[n_zeros,:]=tmp[n_zeros,:]
def zero_rows_norm_eff1(self,hypothesis, structur, k):
#find zero sum rows in hypothesis
#print 'sum hyp'
norma = hypothesis.sum(axis=1)
n_zeros = np.where(norma == 0)
# norm the structure matrix
i_index = list()
j_index = list()
values = list()
for x, i in enumerate(n_zeros[0]):
#if x % 1000 == 0:
# print x, len(n_zeros[0])
links = np.where(structur[i,:]!=0)
value = k / len(links[0])
for j in links[0]:
i_index.append(i)
j_index.append(j)
values.append(value)
hypothesis= hypothesis+csr_matrix((values, (i_index, j_index)),
shape=hypothesis.shape, dtype=np.float)
| mit |
hitszxp/scikit-learn | sklearn/linear_model/tests/test_base.py | 13 | 10412 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_n_jobs():
"""
Test for the n_jobs parameter on the fit method and the constructor
"""
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf_fit = clf.fit(X, Y, 4)
assert_equal(clf_fit.n_jobs, clf.n_jobs)
assert_equal(clf.n_jobs, 1)
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
"""Test output format of sparse_center_data, when input is csr"""
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
AtsushiSakai/PythonRobotics | PathPlanning/WavefrontCPP/wavefront_coverage_path_planner.py | 1 | 6924 | """
Distance/Path Transform Wavefront Coverage Path Planner
author: Todd Tang
paper: Planning paths of complete coverage of an unstructured environment
by a mobile robot - Zelinsky et.al.
link: http://pinkwink.kr/attachment/cfile3.uf@1354654A4E8945BD13FE77.pdf
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
do_animation = True
def transform(
grid_map, src, distance_type='chessboard',
transform_type='path', alpha=0.01
):
"""transform
calculating transform of transform_type from src
in given distance_type
:param grid_map: 2d binary map
:param src: distance transform source
:param distance_type: type of distance used
:param transform_type: type of transform used
:param alpha: weight of Obstacle Transform used when using path_transform
"""
n_rows, n_cols = grid_map.shape
if n_rows == 0 or n_cols == 0:
sys.exit('Empty grid_map.')
inc_order = [[0, 1], [1, 1], [1, 0], [1, -1],
[0, -1], [-1, -1], [-1, 0], [-1, 1]]
if distance_type == 'chessboard':
cost = [1, 1, 1, 1, 1, 1, 1, 1]
elif distance_type == 'eculidean':
cost = [1, np.sqrt(2), 1, np.sqrt(2), 1, np.sqrt(2), 1, np.sqrt(2)]
else:
sys.exit('Unsupported distance type.')
transform_matrix = float('inf') * np.ones_like(grid_map, dtype=float)
transform_matrix[src[0], src[1]] = 0
if transform_type == 'distance':
eT = np.zeros_like(grid_map)
elif transform_type == 'path':
eT = ndimage.distance_transform_cdt(1 - grid_map, distance_type)
else:
sys.exit('Unsupported transform type.')
# set obstacle transform_matrix value to infinity
for i in range(n_rows):
for j in range(n_cols):
if grid_map[i][j] == 1.0:
transform_matrix[i][j] = float('inf')
is_visited = np.zeros_like(transform_matrix, dtype=bool)
is_visited[src[0], src[1]] = True
traversal_queue = [src]
calculated = [(src[0] - 1) * n_cols + src[1]]
def is_valid_neighbor(g_i, g_j):
return 0 <= g_i < n_rows and 0 <= g_j < n_cols \
and not grid_map[g_i][g_j]
while traversal_queue:
i, j = traversal_queue.pop(0)
for k, inc in enumerate(inc_order):
ni = i + inc[0]
nj = j + inc[1]
if is_valid_neighbor(ni, nj):
is_visited[i][j] = True
# update transform_matrix
transform_matrix[i][j] = min(
transform_matrix[i][j],
transform_matrix[ni][nj] + cost[k] + alpha * eT[ni][nj])
if not is_visited[ni][nj] \
and ((ni - 1) * n_cols + nj) not in calculated:
traversal_queue.append((ni, nj))
calculated.append((ni - 1) * n_cols + nj)
return transform_matrix
def get_search_order_increment(start, goal):
if start[0] >= goal[0] and start[1] >= goal[1]:
order = [[1, 0], [0, 1], [-1, 0], [0, -1],
[1, 1], [1, -1], [-1, 1], [-1, -1]]
elif start[0] <= goal[0] and start[1] >= goal[1]:
order = [[-1, 0], [0, 1], [1, 0], [0, -1],
[-1, 1], [-1, -1], [1, 1], [1, -1]]
elif start[0] >= goal[0] and start[1] <= goal[1]:
order = [[1, 0], [0, -1], [-1, 0], [0, 1],
[1, -1], [-1, -1], [1, 1], [-1, 1]]
elif start[0] <= goal[0] and start[1] <= goal[1]:
order = [[-1, 0], [0, -1], [0, 1], [1, 0],
[-1, -1], [-1, 1], [1, -1], [1, 1]]
else:
sys.exit('get_search_order_increment: cannot determine \
start=>goal increment order')
return order
def wavefront(transform_matrix, start, goal):
"""wavefront
performing wavefront coverage path planning
:param transform_matrix: the transform matrix
:param start: start point of planning
:param goal: goal point of planning
"""
path = []
n_rows, n_cols = transform_matrix.shape
def is_valid_neighbor(g_i, g_j):
is_i_valid_bounded = 0 <= g_i < n_rows
is_j_valid_bounded = 0 <= g_j < n_cols
if is_i_valid_bounded and is_j_valid_bounded:
return not is_visited[g_i][g_j] and \
transform_matrix[g_i][g_j] != float('inf')
return False
inc_order = get_search_order_increment(start, goal)
current_node = start
is_visited = np.zeros_like(transform_matrix, dtype=bool)
while current_node != goal:
i, j = current_node
path.append((i, j))
is_visited[i][j] = True
max_T = float('-inf')
i_max = (-1, -1)
i_last = 0
for i_last in range(len(path)):
current_node = path[-1 - i_last] # get latest node in path
for ci, cj in inc_order:
ni, nj = current_node[0] + ci, current_node[1] + cj
if is_valid_neighbor(ni, nj) and \
transform_matrix[ni][nj] > max_T:
i_max = (ni, nj)
max_T = transform_matrix[ni][nj]
if i_max != (-1, -1):
break
if i_max == (-1, -1):
break
else:
current_node = i_max
if i_last != 0:
print('backtracing to', current_node)
path.append(goal)
return path
def visualize_path(grid_map, start, goal, path): # pragma: no cover
oy, ox = start
gy, gx = goal
px, py = np.transpose(np.flipud(np.fliplr(path)))
if not do_animation:
plt.imshow(grid_map, cmap='Greys')
plt.plot(ox, oy, "-xy")
plt.plot(px, py, "-r")
plt.plot(gx, gy, "-pg")
plt.show()
else:
for ipx, ipy in zip(px, py):
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.imshow(grid_map, cmap='Greys')
plt.plot(ox, oy, "-xb")
plt.plot(px, py, "-r")
plt.plot(gx, gy, "-pg")
plt.plot(ipx, ipy, "or")
plt.axis("equal")
plt.grid(True)
plt.pause(0.1)
def main():
dir_path = os.path.dirname(os.path.realpath(__file__))
img = plt.imread(os.path.join(dir_path, 'map', 'test.png'))
img = 1 - img # revert pixel values
start = (43, 0)
goal = (0, 0)
# distance transform wavefront
DT = transform(img, goal, transform_type='distance')
DT_path = wavefront(DT, start, goal)
visualize_path(img, start, goal, DT_path)
# path transform wavefront
PT = transform(img, goal, transform_type='path', alpha=0.01)
PT_path = wavefront(PT, start, goal)
visualize_path(img, start, goal, PT_path)
if __name__ == "__main__":
main()
| mit |
fabioticconi/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.