repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
gtrensch/nest-simulator | pynest/nest/raster_plot.py | 15 | 9348 | # -*- coding: utf-8 -*-
#
# raster_plot.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
""" Functions for raster plotting."""
import nest
import numpy
__all__ = [
'extract_events',
'from_data',
'from_device',
'from_file',
'from_file_numpy',
'from_file_pandas'
]
def extract_events(data, time=None, sel=None):
"""Extract all events within a given time interval.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
time : list, optional
List with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
Returns
-------
numpy.array
List of events as (node_id, t) tuples
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
node_id = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or node_id in sel:
val.append(v)
return numpy.array(val)
def from_data(data, sel=None, **kwargs):
"""Plot raster plot from data array.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
kwargs:
Parameters passed to _make_plot
"""
if len(data) == 0:
raise nest.kernel.NESTError("No data to plot.")
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
node_ids = d[:, 0]
return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs)
def from_file(fname, **kwargs):
"""Plot raster from file.
Parameters
----------
fname : str or tuple(str) or list(str)
File name or list of file names
If a list of files is given, the data from them is concatenated as if
it had been stored in a single file - useful when MPI is enabled and
data is logged separately for each MPI rank, for example.
kwargs:
Parameters passed to _make_plot
"""
if isinstance(fname, str):
fname = [fname]
if isinstance(fname, (list, tuple)):
try:
global pandas
pandas = __import__('pandas')
from_file_pandas(fname, **kwargs)
except ImportError:
from_file_numpy(fname, **kwargs)
else:
print('fname should be one of str/list(str)/tuple(str).')
def from_file_pandas(fname, **kwargs):
"""Use pandas."""
data = None
for f in fname:
dataFrame = pandas.read_table(f, header=2, skipinitialspace=True)
newdata = dataFrame.values
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_file_numpy(fname, **kwargs):
"""Use numpy."""
data = None
for f in fname:
newdata = numpy.loadtxt(f, skiprows=3)
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_device(detec, **kwargs):
"""
Plot raster from a spike recorder.
Parameters
----------
detec : TYPE
Description
kwargs:
Parameters passed to _make_plot
Raises
------
nest.kernel.NESTError
"""
type_id = nest.GetDefaults(detec.get('model'), 'type_id')
if not type_id == "spike_recorder":
raise nest.kernel.NESTError("Please provide a spike_recorder.")
if detec.get('record_to') == "memory":
ts, node_ids = _from_memory(detec)
if not len(ts):
raise nest.kernel.NESTError("No events recorded!")
if "title" not in kwargs:
kwargs["title"] = "Raster plot from device '%i'" % detec.get('global_id')
if detec.get('time_in_steps'):
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs)
elif detec.get("record_to") == "ascii":
fname = detec.get("filenames")
return from_file(fname, **kwargs)
else:
raise nest.kernel.NESTError("No data to plot. Make sure that \
record_to is set to either 'ascii' or 'memory'.")
def _from_memory(detec):
ev = detec.get("events")
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0,
grayscale=False, title=None, xlabel=None):
"""Generic plotting routine.
Constructs a raster plot along with an optional histogram (common part in
all routines above).
Parameters
----------
ts : list
All timestamps
ts1 : list
Timestamps corresponding to node_ids
node_ids : list
Global ids corresponding to ts1
neurons : list
Node IDs of neurons to plot
hist : bool, optional
Display histogram
hist_binwidth : float, optional
Width of histogram bins
grayscale : bool, optional
Plot in grayscale
title : str, optional
Plot title
xlabel : str, optional
Label for x-axis
"""
import matplotlib.pyplot as plt
plt.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = plt.axes([0.1, 0.3, 0.85, 0.6])
plotid = plt.plot(ts1, node_ids, color_marker)
plt.ylabel(ylabel)
plt.xticks([])
xlim = plt.xlim()
plt.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(
numpy.amin(ts), numpy.amax(ts),
float(hist_binwidth)
)
n, _ = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar,
edgecolor=color_edge)
plt.yticks([
int(x) for x in
numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)
])
plt.ylabel("Rate (Hz)")
plt.xlabel(xlabel)
plt.xlim(xlim)
plt.axes(ax1)
else:
plotid = plt.plot(ts1, node_ids, color_marker)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
plt.title("Raster plot")
else:
plt.title(title)
plt.draw()
return plotid
def _histogram(a, bins=10, bin_range=None, normed=False):
"""Calculate histogram for data.
Parameters
----------
a : list
Data to calculate histogram for
bins : int, optional
Number of bins
bin_range : TYPE, optional
Range of bins
normed : bool, optional
Whether distribution should be normalized
Raises
------
ValueError
"""
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if bin_range is not None:
mn, mx = bin_range
if mn > mx:
raise ValueError("max must be larger than min in range parameter")
if not iterable(bins):
if bin_range is None:
bin_range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in bin_range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if (bins[1:] - bins[:-1] < 0).any():
raise ValueError("bins must increase monotonically")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
| gpl-2.0 |
gagneurlab/concise | tests/devel_concise_quasi_X.py | 2 | 1030 | # test quasi X
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../")
from functions.tests import concise_load_data as ld
from functions import concise
from functions import get_data
import pandas as pd
import numpy as np
from imp import reload
from pprint import pprint
pd.set_option("display.max_columns", 100)
# get the data
param, X_feat, X_seq, y, id_vec = ld.load_example_data()
# no positional bias
# TODO: fix the issue with computing the gradient
# - why does it compute the
def test_correct_spline_fit():
# test the nice print:
dc = concise.Concise(n_epochs=50, n_splines=10, **param)
dc.train(X_feat, X_seq, y, X_feat, X_seq, y, n_cores=3)
# why is the derivative equal to 0?
w = dc.get_weights()
w["spline_quasi_X"][0][0]
w["spline_quasi_X"][0][1]
# why not y, n_splines ?!:
y.shape[0], 10
dc.get_accuracy()
dc.print_weights()
# dc.plot_accuracy()
# dc.plot_pos_bias()
y_pred = dc.predict(X_feat, X_seq)
y_pred
| mit |
sameeptandon/sail-car-log | process/ProjectMapOnVideoDense.py | 1 | 5676 | from Q50_config import *
import sys, os
from GPSReader import *
from GPSTransforms import *
from VideoReader import *
from LidarTransforms import *
from ColorMap import *
from transformations import euler_matrix
import numpy as np
import cv2
from ArgParser import *
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.cm import jet, rainbow
from LidarIntegrator import start_fn
WINDOW = 2#50*2.5
#IMG_WIDTH = 1280
#IMG_HEIGHT = 960
IMG_WIDTH = 2080
IMG_HEIGHT = 1552
def cloudToPixels(cam, pts_wrt_cam):
width = 4
(pix, J) = cv2.projectPoints(pts_wrt_cam.transpose(), np.array([0.0,0.0,0.0]), np.array([0.0,0.0,0.0]), cam['KK'], cam['distort'])
pix = pix.transpose()
pix = np.around(pix[:, 0, :])
pix = pix.astype(np.int32)
mask = np.logical_and(True, pix[0,:] > 0 + width/2)
mask = np.logical_and(mask, pix[1,:] > 0 + width/2)
mask = np.logical_and(mask, pix[0,:] < 2080 - width/2)
mask = np.logical_and(mask, pix[1,:] < 1552 - width/2)
mask = np.logical_and(mask, pts_wrt_cam[2,:] > 0)
dist_sqr = np.sum( pts_wrt_cam[0:3, :] ** 2, axis = 0)
mask = np.logical_and(mask, dist_sqr > 3)
return (pix, mask)
def localMapToPixels(map_data, imu_transforms_t, T_from_i_to_l, cam):
# load nearby map frames
pts_wrt_imu_0 = array(map_data[:,0:3]).transpose()
pts_wrt_imu_0 = np.vstack((pts_wrt_imu_0,
np.ones((1,pts_wrt_imu_0.shape[1]))))
# transform points from imu_0 to imu_t
pts_wrt_imu_t = np.dot( np.linalg.inv(imu_transforms_t), pts_wrt_imu_0)
# transform points from imu_t to lidar_t
pts_wrt_lidar_t = np.dot(T_from_i_to_l, pts_wrt_imu_t);
# transform points from lidar_t to camera_t
pts_wrt_camera_t = pts_wrt_lidar_t.transpose()[:, 0:3] + cam['displacement_from_l_to_c_in_lidar_frame']
pts_wrt_camera_t = dot(R_to_c_from_l(cam),
pts_wrt_camera_t.transpose())
pts_wrt_camera_t = np.vstack((pts_wrt_camera_t,
np.ones((1,pts_wrt_camera_t.shape[1]))))
pts_wrt_camera_t = dot(cam['E'], pts_wrt_camera_t)
pts_wrt_camera_t = pts_wrt_camera_t[0:3,:]
# reproject camera_t points in camera frame
(pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)
return (pix, mask, pts_wrt_imu_t)
def trackbarOnchange(t, prev_t):
if abs(t - prev_t) > 1:
video_reader.setFrame(t)
if __name__ == '__main__':
args = parse_args(sys.argv[1], sys.argv[2])
cam_num = int(sys.argv[2][-5])
video_file = args['video']
params = args['params']
cam = params['cam'][cam_num-1]
video_reader = VideoReader(video_file)
gps_reader = GPSReader(args['gps'])
GPSData = gps_reader.getNumericData()
imu_transforms = IMUTransforms(GPSData)
T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])
all_data = np.load(sys.argv[3])
map_data = all_data['data']
#map_data = map_data[map_data[:,3] > 60, :]
# map points are defined w.r.t the IMU position at time 0
# each entry in map_data is (x,y,z,intensity,framenum).
print "Hit 'q' to quit"
trackbarInit = False
interp_grid = np.mgrid[0:IMG_WIDTH, 0:IMG_HEIGHT]
#pix_list = list()
#depth_list = list()
#img_list = list()
fps = 10 # PARAM
fourcc = cv2.cv.CV_FOURCC(*'MJPG')
video_writer = cv2.VideoWriter()
video_writer.open('tmp.avi', fourcc, fps, (IMG_WIDTH, IMG_HEIGHT)) # PARAM
video_reader.setFrame(start_fn)
#while len(pix_list) < 10:
while True:
for count in range(10):
(success, I) = video_reader.getNextFrame()
#print I.shape
if not success:
print 'Done reading video', video_file
break
t = video_reader.framenum - 1
print t
mask_window = (map_data[:,4] < t + WINDOW) & (map_data[:,4] > t )
map_data_copy = array(map_data[mask_window, :])
if map_data_copy.size == 0:
print 'Map data empty'
break
# reproject
(pix, mask, pts_wrt_imu_t) = localMapToPixels(map_data_copy, imu_transforms[t,:,:], T_from_i_to_l, cam)
# draw
pix = pix[:, mask]
intensity = map_data_copy[mask, 3]
depth = pts_wrt_imu_t[0, mask]
img_interp = griddata(pix.T, depth, interp_grid.T)
img_interp[np.isnan(img_interp)] = max(depth) # PARAM
print max(depth)
img_color = rainbow(img_interp / max(depth)) # PARAM
#plt.imshow(img_color)
#plt.show()
img_color = img_color[:, :, 0:3]
img_color = np.uint8(img_color * 255)
img_out = np.uint8(0.0 * I + 1.0*img_color)
video_writer.write(img_out)
#pix_list.append(pix[:, mask])
#depth_list.append(depth)
#img_list.append(I)
#heat_colors = heatColorMapFast(depth, 0, 100)
#for p in range(4):
#I[pix[1,mask]+p, pix[0,mask], :] = heat_colors[0,:,:]
#I[pix[1,mask], pix[0,mask]+p, :] = heat_colors[0,:,:]
#cv2.imshow(video_file, cv2.pyrDown(I))
#if not trackbarInit:
#cv2.createTrackbar('trackbar', video_file, 0, int(video_reader.total_frame_count), lambda x: trackbarOnchange(x, t))
#trackbarInit = True
#else:
#cv2.setTrackbarPos('trackbar', video_file, t)
#keycode = cv2.waitKey(1)
#if keycode == 113:
#break
video_writer.release()
print 'Played %d frames' % t
'''
# Save pickled data
import pickle
data = {
'pix_list': pix_list,
'depth_list': depth_list,
'img_list': img_list
}
pickle.dump(data, open('pix_depth.pkl', 'wb'))
'''
| bsd-2-clause |
geodynamics/specfem1d | Fortran_version/plot_script_using_python.py | 2 | 1288 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 15:52:24 2014
Script to plot the seismograms generated by SPECFEM2D.
The arguments must be correct paths to existing 2D seismogram files or
an existing option (--hold, --grid)
@author: Alexis Bottero (alexis.bottero@gmail.com)
"""
from __future__ import (absolute_import, division, print_function)
import argparse
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
parser = argparse.ArgumentParser(
description='Plot seismograms generated by SPECFEM2D')
parser.add_argument('--hold', action='store_true',
help='Plot all seismograms on the same figure')
parser.add_argument('--grid', action='store_true',
help='Show a grid on the plot')
parser.add_argument('files', nargs='+', type=argparse.FileType('r'),
help='Files to be plotted')
args = parser.parse_args()
for seismo in args.files:
data = np.loadtxt(seismo)
if not args.hold:
plt.figure()
plt.plot(data[:, 0], data[:, 1])
plt.xlim(data[0, 0], data[-1, 0])
plt.grid(args.grid)
plt.hold(args.hold)
if not args.hold:
plt.title(seismo.name)
plt.show()
| gpl-2.0 |
bikong2/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
nlholdem/icodoom | ICO1/deep_feedback_learning_old/testImgProc.py | 3 | 3766 | #!/usr/bin/python3
import deep_feedback_learning
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
import itertools
from PIL import Image
def buildFilters():
ksize = 35
sigma = 5.
gamma = 1.
theta_vals = np.linspace(0., np.pi, 4, endpoint=False)
# lambd_vals = (3,7)
# sigma_vals = (1.5,3)
lambd_vals = (1.5, 3, 7, 17)
sigma_vals = (0.5, 1.5, 3, 7)
"""
theta: orientation
lambda: wavelength
sigma: standard deviation
gamma: aspect ratio
"""
coeffs = ((theta, lambd, sigma) for lambd, sigma in zip(lambd_vals, sigma_vals) for theta in theta_vals)
filters = [(cv2.getGaborKernel((ksize,ksize), coeff[2], coeff[0], coeff[1], gamma)/(0.01*ksize*ksize*sigma), coeff[2])
for coeff in coeffs]
for f in filters:
print("Filter: Min: ", np.amin(f[0]), " Max: ", np.amax(f[0]))
img = Image.fromarray((f[0] * 128 / np.amax(f[0])))
img.show()
return filters
def main(argv):
static = Image.open("/home/paul/Pictures/PBR.png")
staticArray = np.array(static)
# gray = 0.3*staticArray[:,:,2] + 0.59*staticArray[:,:,1] + 0.11*staticArray[:,:,0]
gray = cv2.cvtColor(staticArray, cv2.COLOR_RGB2GRAY)
cv2.imwrite('/home/paul/tmp/Images/gray.jpg',gray)
# cv2.imshow('thing', gray)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# gray = np.sum(staticArray, axis=2)
print("shape: ", staticArray.shape)
gray = gray - np.mean(gray)
width = gray.shape[1]
height = gray.shape[0]
thresh = 0.
print("width: ", width, " height: ", height)
img = Image.fromarray(gray)
# img.show()
filters = buildFilters()
print("there are ", len(filters), " filters")
imgArray = []
i=0
inputImage = np.zeros((height, width))
posImage = np.zeros((height, width))
negImage = np.zeros((height, width))
# inputImage.fill(0.)
# posImage.fill(0.)
# negImage.fill(0.)
for f in filters:
print("sigma: ", f[1])
gray1 = cv2.filter2D(gray, -1, f[0])
print("Filtered: Min: ", np.amin(gray1), " Max: ", np.amax(gray1))
# print("max: ", np.amax(gray1), " min: ", np.amin(gray1))
# print("** max: ", np.amax(gray), " min: ", np.amin(gray))
# gray1 = cv2.resize(gray1, (int(width / f[1]), int(height / f[1])))
print("x: ", int(width / f[1]), " y: ", int(height / f[1]))
negImage[np.where(gray1<thresh)] += gray1[np.where(gray1<thresh)] / float(len(filters))
posImage[np.where(gray1>thresh)] += gray1[np.where(gray1>thresh)] / float(len(filters))
inputImage = np.append(inputImage, gray1)
print("inputImage size: ", inputImage.shape)
# imgArray.append(Image.fromarray(gray1))
# imgArray[i].show()
# i += 1
print("Pos Max: ", np.amax(posImage), " Min: ", np.amin(posImage))
print("Neg Max: ", np.amax(negImage), " Min: ", np.amin(negImage))
# posImage = 255. * (posImage - np.amin(posImage)) / (np.amax(posImage) - np.amin(posImage))
# negImage = 255. * (negImage - np.amin(negImage)) / (np.amax(negImage) - np.amin(negImage))
negImage = -negImage
imgArray.append(Image.fromarray(posImage))
imgArray.append(Image.fromarray(negImage))
# cv2.imshow('Pos', posImage)
# cv2.imshow('Neg', negImage)
cv2.imwrite('/home/paul/tmp/Images/pos.jpg',posImage)
cv2.imwrite('/home/paul/tmp/Images/neg.jpg',negImage)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# imgArray[0].show()
# imgArray[1].show()
# input('press a key')
print("**inputImage size: ", inputImage.shape)
if __name__ == '__main__':
if len(sys.argv) < 0:
print ("usage: loadWeights epoch n_filters width height")
else:
main(sys.argv[1:])
| gpl-3.0 |
equialgo/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
mespe/SolRad | collection/compare_cimis_cfsr/compare_cimis_cfsr.py | 1 | 2737 | import pandas as pd
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import netCDF4
def load_CFSR_data():
my_example_nc_file = 'RES.nc' # latitude, longitude = (39.5, -122)
fh = Dataset(my_example_nc_file, mode='r')
print(fh.variables.keys())
print(help(fh.variables['time']))
print(fh.variables['time'].name)
####time = fh['time'][:]
####print(time)
times = fh.variables['time']
time_np = netCDF4.num2date(times[:],times.units) -pd.offsets.Hour(8)
#print(time_np.shape)
variables = {"SHTFL_L1_Avg_1" : "Sensible heat flux",
"DSWRF_L1_Avg_1" : "Downward shortwave radiation flux",
"CSDSF_L1_Avg_1" : "Clear sky downward solar flux",
"DSWRF_L1_Avg_1" : "Downward shortwave radiation flux",
"DLWRF_L1_Avg_1" : "Downward longwave radiation flux",
"CSULF_L1_Avg_1" : "Clear sky upward longwave flux",
"GFLUX_L1_Avg_1" : "Ground heat flux"}
#downward_solar_flux_np = fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0]- fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0]
downward_solar_flux_np = fh.variables["CSDLF_L1_Avg_1"][:, 0, 0]
#(fh.variables["SHTFL_L1_Avg_1"][:,0,0] + fh.variables["LHTFL_L1_Avg_1"][:,0,0] +
#fh.variables["DSWRF_L1_Avg_1"][:,0,0] + fh.variables["DLWRF_L1_Avg_1"][:,0,0] -
#fh.variables["USWRF_L1_Avg_1"][:,0,0] - fh.variables["ULWRF_L1_Avg_1"][:,0,0] +
#fh.variables["GFLUX_L1_Avg_1"][:,0,0] )
#print(downward_solar_flux_np.shape)
df = pd.DataFrame({'datetime': time_np, 'solar rad': downward_solar_flux_np})
#plt.plot(df['time'][:100], df['solar'][:100])
# save to a pickle file
df.to_pickle('cfsr_2005_2010.pkl')
#'CSDSF_L1_Avg_1'
for key in fh.variables.keys():
variable = fh.variables[key]
#variable = fh.variables[key][:]
print(variable)
print()
def compare():
cimis = pd.read_pickle('cimis_2005_2010.pkl')
cfsr = pd.read_pickle('cfsr_2005_2010.pkl')
plt.plot(cfsr['datetime'][:250], cfsr['solar rad'][:250], label = "cfsr")
plt.plot(cimis['datetime'][:1500], cimis['solar rad'][:1500], label = "cimis")
plt.legend()
#for i in range(10):
# print(cfsr['datetime'][i], cimis['datetime'][i])
#print(cimis['datetime'][i])
load_CFSR_data()
#compare() | mit |
ProstoMaxim/incubator-airflow | tests/contrib/hooks/test_bigquery_hook.py | 16 | 8098 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
douglasbagnall/py_bh_tsne | test_radial.py | 1 | 1367 | #!/usr/bin/python
import gzip, cPickle
import numpy as np
import matplotlib.pyplot as plt
import sys
from fasttsne import fast_tsne
import random
random.seed(1)
def generate_angular_clusters(n, d, extra_d=10):
data = []
classes = []
for i in range(n):
scale = random.random() * 5 + 0.1
centre = [random.randrange(2) for x in range(d)]
_class = ''.join(str(x) for x in centre)
row = [scale * (random.random() * 0.05 + x - 0.5)
for x in centre]
row += [random.random() * 0.01 for x in range(extra_d)]
data.append(row)
classes.append(_class)
data = np.asarray(data)
return data, classes
def main():
data, classes = generate_angular_clusters(5000, 4)
#print data
if len(sys.argv) > 1:
mode = int(sys.argv[1])
else:
mode = 2
Y = fast_tsne(data, perplexity=10, mode=mode)
#print zip(classes, Y)[:50]
digits = set(classes)
fig = plt.figure()
colormap = plt.cm.spectral
plt.gca().set_color_cycle(colormap(i) for i in np.linspace(0, 0.6, 10))
ax = fig.add_subplot(111)
labels = []
for d in sorted(digits):
idx = np.array([x == d for x in classes], dtype=np.bool)
ax.plot(Y[idx, 0], Y[idx, 1], 'o')
labels.append(d)
ax.legend(labels, numpoints=1, fancybox=True)
plt.show()
main()
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/ipykernel/inprocess/tests/test_kernel.py | 8 | 2417 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import sys
import unittest
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
from ipython_genutils import py3compat
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
if py3compat.PY3:
self.kc.execute('x = input()')
else:
self.kc.execute('x = raw_input()')
finally:
sys.stdin = sys_stdin
self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar')
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
self.assertEqual(io.stdout, 'foo\n')
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
self.assertEqual(out, 'bar\n')
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')
| apache-2.0 |
timmyshen/kaggle-titanic | python_script/myfirstforest.py | 26 | 4081 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
| mit |
alexandrwang/6882project | strens/experiment.py | 1 | 2946 | import numpy as np
class Experiment(object):
""" An experiment matches up a task with an agent and handles their interactions.
"""
def __init__(self, task, agent):
self.task = task
self.agent = agent
self.stepid = 0
def doInteractions(self, number=1):
""" The default implementation directly maps the methods of the agent and the task.
Returns the number of interactions done.
"""
for _ in range(number):
self._oneInteraction()
return self.stepid
def _oneInteraction(self):
""" Give the observation to the agent, takes its resulting action and returns
it to the task. Then gives the reward to the agent again and returns it.
"""
self.stepid += 1
# observations from tasks are vectors
self.agent.integrateObservation(self.task.getObservation())
self.task.performAction(self.agent.getAction())
reward = self.task.getReward()
newstate = self.task.getObservation()
self.agent.getReward(reward)
return reward
if __name__=="__main__":
from environments.loop import Loop, LoopTask
from environments.chain import Chain, ChainTask
from environments.maze import FlagMaze, FlagMazeTask
from module import ActionModule
from agent import BayesAgent
# env = Loop()
# task = LoopTask(env)
# env = Chain()
# task = ChainTask(env)
# struct = np.array([[0, 0, 0, 0, 0],
# [0, 1, 1, 0, 0],
# [0, 1, 1, 1, 0],
# [0, 1, 1, 1, 0],
# [0, 1, 0, 1, 0],
# [0, 0, 0, 0, 0]])
# flagPos = [(3, 1)]
# goal = (3, 3)
struct = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
flagPos = [(5,1), (7, 3), (3, 5)]
goal = (1, 7)
env = FlagMaze(struct, flagPos, goal)
task = FlagMazeTask(env)
module = ActionModule(env.outdim, env.indim)
agent = BayesAgent(module)
exp = Experiment(task, agent)
reward = 0
xs = []
ys = []
import matplotlib.pyplot as plt
for i in xrange(5000):
exp.doInteractions(1)
reward += agent.lastreward
if i%50 == 0:
xs.append(i)
ys.append(reward)
if agent.lastreward > 0:
print "ACTION:",agent.lastaction, "STATE:",agent.laststate, "REWARD:",agent.lastreward
# print env.curPos
print "TOTAL REWARD:", reward
print ys
plt.plot(xs, ys)
plt.show()
| mit |
arokem/sklearn-forest-ci | examples/plot_mpg_svr.py | 2 | 1964 | """
======================================
Plotting Bagging Regression Error Bars
======================================
This example demonstrates using `forestci` to calculate the error bars of
the predictions of a :class:`sklearn.ensemble.BaggingRegressor` object.
The data used here are a classical machine learning data-set, describing
various features of different cars, and their MPG.
"""
# Regression Forest Example
import numpy as np
from matplotlib import pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.svm import SVR
import sklearn.model_selection as xval
from sklearn.datasets import fetch_openml
import forestci as fci
# retreive mpg data from machine learning library
mpg_data = fetch_openml(data_id=196)
# separate mpg data into predictors and outcome variable
mpg_X = mpg_data["data"]
mpg_y = mpg_data["target"]
# remove rows where the data is nan
not_null_sel = np.invert(np.sum(np.isnan(mpg_data["data"]), axis=1).astype(bool))
mpg_X = mpg_X[not_null_sel]
mpg_y = mpg_y[not_null_sel]
# split mpg data into training and test set
mpg_X_train, mpg_X_test, mpg_y_train, mpg_y_test = xval.train_test_split(
mpg_X, mpg_y, test_size=0.25, random_state=42
)
# Create RandomForestRegressor
n_estimators = 1000
mpg_bagger = BaggingRegressor(
base_estimator=SVR(), n_estimators=n_estimators, random_state=42
)
mpg_bagger.fit(mpg_X_train, mpg_y_train)
mpg_y_hat = mpg_bagger.predict(mpg_X_test)
# Plot predicted MPG without error bars
plt.scatter(mpg_y_test, mpg_y_hat)
plt.plot([5, 45], [5, 45], "k--")
plt.xlabel("Reported MPG")
plt.ylabel("Predicted MPG")
plt.show()
# Calculate the variance
mpg_V_IJ_unbiased = fci.random_forest_error(mpg_bagger, mpg_X_train, mpg_X_test)
# Plot error bars for predicted MPG using unbiased variance
plt.errorbar(mpg_y_test, mpg_y_hat, yerr=np.sqrt(mpg_V_IJ_unbiased), fmt="o")
plt.plot([5, 45], [5, 45], "k--")
plt.xlabel("Reported MPG")
plt.ylabel("Predicted MPG")
plt.show()
| mit |
wavelets/ThinkStats2 | code/chap12ex_soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
bm2-lab/cage | src/core/seqfs/seqfeature_mtlas_selector.py | 1 | 1888 | from __future__ import division
import numpy as np
import pandas as pd
from lxml import etree
from collections import namedtuple
from seqfeature_extractor import ExtractSeqFeature
from src.core import ml
Fr = namedtuple('Fr', ['fs', 'ups', 'dws'])
def __SaveFeatureReport(fr, str_of_fesrep):
f_fesrep = open(str_of_fesrep, 'w')
enode_root = etree.Element('report')
enode_fes =etree.SubElement(enode_root, 'features')
for fs in fr.fs:
enode_fes.append(etree.Element(fs))
enode_fes.set('method', 'mtlasso')
enode_fes.set('n', str(len(fr.fs)))
enode_fes.set('ups', str(fr.ups))
enode_fes.set('dws', str(fr.dws))
f_fesrep.write(etree.tostring(enode_root, pretty_print=True, xml_declaration=True, encoding='utf-8'))
f_fesrep.close()
def MtLassoFeatureSelection(lst_sg, lst_st, lst_ref, lst_seq,
str_of_fesrep, int_ups=30, int_dws=27, flt_lmd=0.5):
lst_x = []
lst_y = []
ind = [0]
lst_header = []
for i in xrange(len(lst_sg)):
dfm = ExtractSeqFeature(lst_sg[i], lst_ref[i], int_ups, int_dws)
dfm.to_csv(lst_seq[i], sep='\t', index=None)
dfm_y = pd.read_csv(lst_st[i], sep='\t', index_col=None)[[0,-1]]
dfm = pd.merge(dfm, dfm_y, on='sgID')
x = np.array(dfm.ix[:,1:-1], dtype=np.double)
y = np.array(dfm[[-1]], dtype=np.double)
lst_x.append(x)
lst_y.append(y)
ind.append(dfm.shape[0])
if i == 0:
lst_header = dfm.columns[1:-1].tolist()
x = np.double(np.vstack(lst_x))
y = np.double(np.vstack(lst_y))
ind = np.cumsum(ind)
dict_opts = dict(init=2, rFlag=1, rsL2=0, ind=ind, nFlag=2)
idx = ml.MtLassoSelector(x, y, flt_lmd, dict_opts)
lst_fs = [lst_header[i] for i in idx]
fr = Fr(fs=lst_fs, ups=int_ups, dws=int_dws)
__SaveFeatureReport(fr, str_of_fesrep)
| mit |
odwyer-lab/microbial-innovations | bin/merge_tax.py | 2 | 1368 | import sys
import os
import re
from Bio import SeqIO
import numpy
import pandas
in_file1 = sys.argv[1]
in_file2 = sys.argv[2]
out_file = sys.argv[3]
# in_file1 = './data/LTPs123_unique.nr_v123.wang.taxonomy'
# in_file2 = './data/LTPs123_unique.taxonomy'
# out_file = './data/LTPs123.unique.full.taxonomy'
tax1 = pandas.DataFrame.from_csv(in_file1, sep = '\t', header = None, index_col = None)
tax2 = pandas.DataFrame.from_csv(in_file2, sep = '\t', header = 0, index_col = None)
if(len(tax1) == len(tax2)):
len_tax = len(tax1)
else:
print('Stop: Taxonomy files are not the same length')
sys.exit(1)
if not(numpy.array_equal(tax1.iloc[0:, 0], tax2.iloc[0:, 0])):
print('Stop: OTUs are not ordered correctly')
sys.exit(1)
print(tax1.iloc[0:3, 0:])
print(tax2.iloc[0:3, 0:])
temp_otu = list()
temp_tax = list()
for i in range(0,len_tax):
otu = tax1.iloc[i, 0]
temp_otu.append(otu)
temp = tax1.iloc[i,1]
temp_split = temp.rsplit(';')
temp_high = '%s;%s;%s;%s' % (temp_split[0], temp_split[1], temp_split[2], temp_split[3])
temp_low = tax2.iloc[i, 1]
temp_join = '%s;%s' % (temp_high, temp_low)
temp_tax.append(temp_join)
OTU_Tax = pandas.DataFrame(
{'OTU': temp_otu,
'Taxonomy': temp_tax
}, columns = ['OTU', 'Taxonomy'])
OTU_Tax.to_csv(out_file, sep='\t', header=False, index=False)
print("Done")
| gpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/paraview/package.py | 4 | 8421 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Paraview(CMakePackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'http://www.paraview.org'
url = "http://www.paraview.org/files/v5.3/ParaView-v5.3.0.tar.gz"
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.gz'
version('5.4.1', '4030c70477ec5a85aa72d6fc86a30753')
version('5.4.0', 'b92847605bac9036414b644f33cb7163')
version('5.3.0', '68fbbbe733aa607ec13d1db1ab5eba71')
version('5.2.0', '4570d1a2a183026adb65b73c7125b8b0')
version('5.1.2', '44fb32fc8988fcdfbc216c9e40c3e925')
version('5.0.1', 'fdf206113369746e2276b95b257d2c9b')
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('examples', default=False, description="Build examples")
variant('hdf5', default=False, description="Use external HDF5")
depends_on('python@2:2.8', when='+python')
depends_on('py-numpy', when='+python', type='run')
depends_on('py-matplotlib', when='+python', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt+opengl', when='@5.3.0:+qt+opengl2')
depends_on('qt~opengl', when='@5.3.0:+qt~opengl2')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('mesa+swrender', when='+osmesa')
depends_on('libxt', when='+qt')
conflicts('+qt', when='+osmesa')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('hdf5+hl+mpi', when='+hdf5+mpi')
depends_on('hdf5+hl~mpi', when='+hdf5~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
# depends_on('netcdf')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('cmake@3.3:', type='build')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
def url_for_version(self, version):
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return self._urlfmt.format(version.up_to(2), version, '-source')
else:
return self._urlfmt.format(version.up_to(2), version, '')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
paraview_version = 'paraview-%s' % self.spec.version.up_to(2)
spack_env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', paraview_version))
def setup_environment(self, spack_env, run_env):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
paraview_version = 'paraview-%s' % self.spec.version.up_to(2)
run_env.prepend_path('LIBRARY_PATH', join_path(lib_dir,
paraview_version))
run_env.prepend_path('LD_LIBRARY_PATH', join_path(lib_dir,
paraview_version))
run_env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', paraview_version))
if '+python' in self.spec:
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version))
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version, 'site-packages'))
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version, 'site-packages', 'vtk'))
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DBUILD_EXAMPLES:BOOL=%s' % variant_bool('+examples'),
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=%s' % variant_bool('+hdf5'),
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=OFF',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
if '+python' in spec:
cmake_args.extend([
'-DPARAVIEW_ENABLE_PYTHON:BOOL=ON',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % spec['python'].command.path
])
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix,
'-DMPI_CXX_COMPILER:PATH=%s' % spec['mpi'].mpicxx,
'-DMPI_C_COMPILER:PATH=%s' % spec['mpi'].mpicc,
'-DMPI_Fortran_COMPILER:PATH=%s' % spec['mpi'].mpifc
])
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
# Hide git from Paraview so it will not use `git describe`
# to find its own version number
if spec.satisfies('@5.4.0:5.4.1'):
cmake_args.extend([
'-DGIT_EXECUTABLE=FALSE'
])
return cmake_args
| lgpl-2.1 |
Akshay0724/scikit-learn | sklearn/grid_search.py | 16 | 40213 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/scalar/test_period.py | 6 | 50302 | import pytest
import numpy as np
from datetime import datetime, date, timedelta
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas.compat import text_type, iteritems
from pandas.compat.numpy import np_datetime64_compat
from pandas._libs import tslib, period as libperiod
from pandas import Period, Timestamp, offsets
from pandas.tseries.frequencies import DAYS, MONTHS
class TestPeriodProperties(object):
"Test properties such as year, month, weekday, etc...."
def test_is_leap_year(self):
# GH 13727
for freq in ['A', 'M', 'D', 'H']:
p = Period('2000-01-01 00:00:00', freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period('1999-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
p = Period('2004-01-01 00:00:00', freq=freq)
assert p.is_leap_year
p = Period('2100-01-01 00:00:00', freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='Q-DEC')
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq='M')
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
assert '1989Q3' in str(exp)
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
assert p == exp
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = pd.Period('2011-01', freq='M')
res = pd.Period._from_ordinal(p.ordinal, freq='M')
assert p == res
assert isinstance(res, Period)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
assert p is pd.NaT
p = Period('nat', freq='W-SUN')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='D')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='3D')
assert p is pd.NaT
p = Period(tslib.iNaT, freq='1D1H')
assert p is pd.NaT
p = Period('NaT')
assert p is pd.NaT
p = Period(tslib.iNaT)
assert p is pd.NaT
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == '3M'
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == 'M'
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == '3M'
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='0M')
def test_period_cons_combined(self):
p = [(Period('2011-01', freq='1D1H'),
Period('2011-01', freq='1H1D'),
Period('2011-01', freq='H')),
(Period(ordinal=1, freq='1D1H'),
Period(ordinal=1, freq='1H1D'),
Period(ordinal=1, freq='H'))]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == '25H'
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == '25H'
assert p3.freq == offsets.Hour()
assert p3.freqstr == 'H'
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == '25H'
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == '25H'
msg = ('Frequency must be positive, because it'
' represents span: -25H')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-1D1H')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='-1H1D')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='-1D1H')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='-1H1D')
msg = ('Frequency must be positive, because it'
' represents span: 0D')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='0D0H')
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq='0D0H')
# You can only combine together day and intraday offsets
msg = ('Invalid frequency: 1W1D')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='1W1D')
msg = ('Invalid frequency: 1D1W')
with tm.assert_raises_regex(ValueError, msg):
Period('2011-01', freq='1D1W')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
def test_timestamp_tz_arg_dateutil(self):
from pandas._libs.tslib import _dateutil_gettz as gettz
from pandas._libs.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(
tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
assert p == exp
assert p.tz == gettz(case.split('/', 1)[1])
assert p.tz == exp.tz
p = Period('1/1/2005',
freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
assert p == exp
assert p.tz == gettz(case.split('/', 1)[1])
assert p.tz == exp.tz
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas._libs.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005',
freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
assert p.tz == gettz('Europe/Brussels')
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01')
assert p.to_timestamp(how='E') == pd.Timestamp('2011-01-31')
p = pd.Period('2011-01', freq='3M')
assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01')
assert p.to_timestamp(how='E') == pd.Timestamp('2011-03-31')
def test_construction(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
assert i1 == i2
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
assert i1 == i2
assert i1 == i3
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
pytest.raises(ValueError, i1.__ne__, i4)
assert i4 == i5
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
assert i1 == i2
assert i1 == i3
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
assert i1 == i2
i2 = Period('1982', freq=('Min', 1))
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq='d')
assert i1 == i3
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/11/12', freq='D')
assert i1 == i2.asfreq('B')
i2 = Period('3/12/12', freq='D')
assert i1 == i2.asfreq('B')
i3 = Period('3/10/12', freq='b')
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
assert i1 == i2
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
assert i1 == i2
assert i1 == i3
i1 = Period('05Q1')
assert i1 == i2
lower = Period('05q1')
assert i1 == lower
i1 = Period('1Q2005')
assert i1 == i2
lower = Period('1q2005')
assert i1 == lower
i1 = Period('1Q05')
assert i1 == i2
lower = Period('1q05')
assert i1 == lower
i1 = Period('4Q1984')
assert i1.year == 1984
lower = Period('4q1984')
assert i1 == lower
def test_construction_month(self):
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period('200701', freq='M')
assert i1 == expected
i1 = Period(200701, freq='M')
assert i1 == expected
i1 = Period(ordinal=200701, freq='M')
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert (Period('1/1/2005', freq=offsets.MonthEnd()) ==
Period('1/1/2005', freq='M'))
assert (Period('2005', freq=offsets.YearEnd()) ==
Period('2005', freq='A'))
assert (Period('2005', freq=offsets.MonthEnd()) ==
Period('2005', freq='M'))
assert (Period('3/10/12', freq=offsets.BusinessDay()) ==
Period('3/10/12', freq='B'))
assert (Period('3/10/12', freq=offsets.Day()) ==
Period('3/10/12', freq='D'))
assert (Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=1, freq='Q'))
assert (Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)) ==
Period(year=2005, quarter=2, freq='Q'))
assert (Period(year=2005, month=3, day=1, freq=offsets.Day()) ==
Period(year=2005, month=3, day=1, freq='D'))
assert (Period(year=2012, month=3, day=10, freq=offsets.BDay()) ==
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
assert (Period(year=2005, month=3, day=1,
freq=offsets.Day(3)) == expected)
assert Period(year=2005, month=3, day=1, freq='3D') == expected
assert (Period(year=2012, month=3, day=10,
freq=offsets.BDay(3)) ==
Period(year=2012, month=3, day=10, freq='3B'))
assert (Period(200701, freq=offsets.MonthEnd()) ==
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
assert i1 == expected
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
assert i1 == expected
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
assert i1 == expected
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
assert i1 == expected
pytest.raises(ValueError, Period, ordinal=200701)
pytest.raises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
assert i1.freq == offsets.Minute()
assert i1.freqstr == 'T'
def test_period_deprecated_freq(self):
cases = {"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]}
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
for exp, freqs in iteritems(cases):
for freq in freqs:
with tm.assert_raises_regex(ValueError, msg):
Period('2016-03-01 09:00', freq=freq)
with tm.assert_raises_regex(ValueError, msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period('2016-03-01 09:00', freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def test_hash(self):
assert (hash(Period('2011-01', freq='M')) ==
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01-01', freq='D')) !=
hash(Period('2011-01', freq='M')))
assert (hash(Period('2011-01', freq='3M')) !=
hash(Period('2011-01', freq='2M')))
assert (hash(Period('2011-01', freq='M')) !=
hash(Period('2011-02', freq='M')))
def test_repr(self):
p = Period('Jan-2000')
assert '2000-01' in repr(p)
p = Period('2000-12-15')
assert '2000-12-15' in repr(p)
def test_repr_nat(self):
p = Period('nat', freq='M')
assert repr(tslib.NaT) in repr(p)
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
assert res == '2000-01-01 12:34:12'
assert isinstance(res, text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
assert result == 4
with pytest.raises(period.IncompatibleFrequency):
left - Period('2007-01', freq='M')
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
assert start_ts == p.to_timestamp('D', how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp('3D', how=a)
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
assert end_ts == p.to_timestamp('D', how=a)
assert end_ts == p.to_timestamp('3D', how=a)
from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how='S')
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
assert result == expected
result = p.to_timestamp('3H', how='end')
assert result == expected
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
assert result == expected
result = p.to_timestamp('2T', how='end')
assert result == expected
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
assert result == expected
result = p.to_timestamp('T', how='start')
assert result == expected
result = p.to_timestamp('S', how='start')
assert result == expected
result = p.to_timestamp('3H', how='start')
assert result == expected
result = p.to_timestamp('5S', how='start')
assert result == expected
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
assert p.start_time == xp
assert Period('2012', freq='B').start_time == datetime(2012, 1, 2)
assert Period('2012', freq='W').start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='D')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period('2012', freq='H')
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period('2012', freq='B')
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period('2012', freq='W')
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period('2012', freq='15D')
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period('2012', freq='1D1H')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period('2012', freq='1H1D')
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq='W', year=2012,
month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq='W', year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq='B', year=2012,
month=2, day=1).days_in_month == 29
d_date = Period(freq='D', year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2,
day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month == 29
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month == 29
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month == 29
def test_pnow(self):
# deprecation, xref #13790
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
period.pnow('D')
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
assert Period(year=2007, month=1, freq='2M') == expected
pytest.raises(ValueError, Period, datetime.now())
pytest.raises(ValueError, Period, datetime.now().date())
pytest.raises(ValueError, Period, 1.6, freq='D')
pytest.raises(ValueError, Period, ordinal=1.6, freq='D')
pytest.raises(ValueError, Period, ordinal=2, value=1, freq='D')
assert Period(None) is pd.NaT
pytest.raises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
assert result == exp
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
assert p.freq == 'D'
p = Period('2007-01-01 07')
assert p.freq == 'H'
p = Period('2007-01-01 07:10')
assert p.freq == 'T'
p = Period('2007-01-01 07:10:15')
assert p.freq == 'S'
p = Period('2007-01-01 07:10:15.123')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123000')
assert p.freq == 'L'
p = Period('2007-01-01 07:10:15.123400')
assert p.freq == 'U'
def test_badinput(self):
pytest.raises(ValueError, Period, '-2000', 'A')
pytest.raises(tslib.DateParseError, Period, '0', 'A')
pytest.raises(tslib.DateParseError, Period, '1/1/-2000', 'A')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
assert result1.ordinal == result2.ordinal
assert result1.freqstr == '2A-DEC'
assert result2.freqstr == 'A-DEC'
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
def test_round_trip(self):
p = Period('2000Q1')
new_p = tm.round_trip_pickle(p)
assert new_p == p
class TestPeriodField(object):
def test_get_period_field_raises_on_out_of_range(self):
pytest.raises(ValueError, libperiod.get_period_field, -1, 0, 0)
def test_get_period_field_array_raises_on_out_of_range(self):
pytest.raises(ValueError, libperiod.get_period_field_arr, -1,
np.empty(1), 0)
class TestComparisons(object):
def setup_method(self, method):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
assert self.january1 == self.january2
def test_equal_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 == self.day
def test_notEqual(self):
assert self.january1 != 1
assert self.january1 != self.february
def test_greater(self):
assert self.february > self.january1
def test_greater_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 > self.day
def test_greater_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
assert self.january1 >= self.january2
def test_greaterEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 >= self.day
with pytest.raises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
assert self.january1 <= self.january2
def test_smallerEqual_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 <= 1
def test_smaller(self):
assert self.january1 < self.february
def test_smaller_Raises_Value(self):
with pytest.raises(period.IncompatibleFrequency):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
assert sorted(periods) == correctPeriods
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), (nat, t),
(t, nat), (nat, nat)]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestMethods(object):
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert dt1 + 1 == dt2
assert 1 + dt1 == dt2
def test_add_pdnat(self):
p = pd.Period('2011-01', freq='M')
assert p + pd.NaT is pd.NaT
assert pd.NaT + p is pd.NaT
p = pd.Period('NaT', freq='M')
assert p + pd.NaT is pd.NaT
assert pd.NaT + p is pd.NaT
def test_add_raises(self):
# GH 4731
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
msg = r"unsupported operand type\(s\)"
with tm.assert_raises_regex(TypeError, msg):
dt1 + "str"
msg = r"unsupported operand type\(s\)"
with tm.assert_raises_regex(TypeError, msg):
"str" + dt1
with tm.assert_raises_regex(TypeError, msg):
dt1 + dt2
def test_sub(self):
dt1 = Period('2011-01-01', freq='D')
dt2 = Period('2011-01-15', freq='D')
assert dt1 - dt2 == -14
assert dt2 - dt1 == 14
msg = r"Input has different freq=M from Period\(freq=D\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
dt1 - pd.Period('2011-02', freq='M')
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
exp = Period('2013', freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
exp = Period('2011-05', freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period('2012-03', freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
exp = Period('2011-04-06', freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period('2011-04-02', freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + np.timedelta64(2, 'D') == exp
with pytest.raises(TypeError):
np.timedelta64(2, 'D') + p
exp = Period('2011-04-02', freq=freq)
assert p + np.timedelta64(3600 * 24, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600 * 24, 's') + p
exp = Period('2011-03-30', freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period('2011-04-03', freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
exp = Period('2011-04-03 09:00', freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
exp = Period('2011-04-01 12:00', freq=freq)
assert p + np.timedelta64(3, 'h') == exp
with pytest.raises(TypeError):
np.timedelta64(3, 'h') + p
exp = Period('2011-04-01 10:00', freq=freq)
assert p + np.timedelta64(3600, 's') == exp
with pytest.raises(TypeError):
np.timedelta64(3600, 's') + p
exp = Period('2011-04-01 11:00', freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period('2011-04-05 12:00', freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(period.IncompatibleFrequency):
o + p
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p + o is tslib.NaT
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p + o is tslib.NaT
if not isinstance(o, np.timedelta64):
assert o + p is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p + o is tslib.NaT
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
assert o + p is tslib.NaT
def test_sub_pdnat(self):
# GH 13071
p = pd.Period('2011-01', freq='M')
assert p - pd.NaT is pd.NaT
assert pd.NaT - p is pd.NaT
p = pd.Period('NaT', freq='M')
assert p - pd.NaT is pd.NaT
assert pd.NaT - p is pd.NaT
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
assert p - offsets.YearEnd(2) == Period('2009', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
assert p - offsets.MonthEnd(2) == Period('2011-01', freq=freq)
assert p - offsets.MonthEnd(12) == Period('2010-03', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
assert p - offsets.Day(5) == Period('2011-03-27', freq=freq)
assert p - offsets.Hour(24) == Period('2011-03-31', freq=freq)
assert p - np.timedelta64(2, 'D') == Period(
'2011-03-30', freq=freq)
assert p - np.timedelta64(3600 * 24, 's') == Period(
'2011-03-31', freq=freq)
assert p - timedelta(-2) == Period('2011-04-03', freq=freq)
assert p - timedelta(hours=48) == Period('2011-03-30', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
assert p - offsets.Day(2) == Period('2011-03-30 09:00', freq=freq)
assert p - offsets.Hour(3) == Period('2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3, 'h') == Period(
'2011-04-01 06:00', freq=freq)
assert p - np.timedelta64(3600, 's') == Period(
'2011-04-01 08:00', freq=freq)
assert p - timedelta(minutes=120) == Period(
'2011-04-01 07:00', freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
'2011-03-28 06:00', freq=freq)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with pytest.raises(period.IncompatibleFrequency):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is tslib.NaT
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
assert p - o is tslib.NaT
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
assert p - o is tslib.NaT
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
assert p - o is tslib.NaT
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
assert p - o is tslib.NaT
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
assert p + 1 is tslib.NaT
assert 1 + p is tslib.NaT
assert p - 1 is tslib.NaT
assert p - Period('2011-01', freq=freq) is tslib.NaT
assert Period('2011-01', freq=freq) - p is tslib.NaT
def test_period_ops_offset(self):
p = Period('2011-04-01', freq='D')
result = p + offsets.Day()
exp = pd.Period('2011-04-02', freq='D')
assert result == exp
result = p - offsets.Day(2)
exp = pd.Period('2011-03-30', freq='D')
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
p + offsets.Hour(2)
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
p - offsets.Hour(2)
| mit |
AlexRobson/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mbalasso/mynumpy | numpy/fft/fftpack.py | 9 | 39261 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| bsd-3-clause |
michaelneuder/image_quality_analysis | bin/nets/wip/ssim_nets/ssim_net2.py | 2 | 9518 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import pandas as pd
import numpy as np
mpl.use('Agg')
import time
import matplotlib.pyplot as plt
def convolve_inner_layers(x, W, b):
'''
inner layers of network --- tanh activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return tf.nn.relu(y)
def convolve_ouput_layer(x, W, b):
'''
output layer of network --- linear activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
'''
entire conv net. each layer feed to following layer as well as output layer
'''
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
conv4 = convolve_inner_layers(conv3, W['weights4'], b['bias4'])
output_feed = tf.concat([conv1, conv2, conv3, conv4],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
'''
returns variance of the target data. used in normalizing the error.
'''
all_pixels = training_target.flatten()
return all_pixels.var()
def normalize_input(train_data, test_data):
'''
normailizing input across each pixel an each channel (i.e. normalize for each input to network).
'''
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def get_epoch(x, y, n):
'''
splits entire data set into an epoch with minibatch of size n. returns a dict with key being the
minibatch number and the value being a length 2 list with the features in first index and
targets in the second.
'''
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def main():
print('welcome to ssim net.')
# parameters
filter_dim, filter_dim2 = 11, 1
batch_size = 4
image_dim, result_dim = 96, 86
input_layer, first_layer, second_layer, third_layer, fourth_layer, output_layer = 4, 100, 50, 25, 10, 1
learning_rate = .001
epochs = 5000
# data input
# data_path = 'https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/'
data_path = '/home/neuder/Dropbox/github/image_quality_analysis/data/sample_data/'
# train data --- 500 images, 96x96 pixels
orig_500 = pd.read_csv('{}orig_500.txt'.format(data_path), header=None, delim_whitespace = True)
recon_500 = pd.read_csv('{}recon_500.txt'.format(data_path), header=None, delim_whitespace = True)
# test data --- 140 images, 96x96 pixels
orig_140 = pd.read_csv('{}orig_140.txt'.format(data_path), header=None, delim_whitespace = True)
recon_140 = pd.read_csv('{}recon_140.txt'.format(data_path), header=None, delim_whitespace = True)
# train target --- 500 images, 86x86 pixels (dimension reduction due no zero padding being used)
ssim_500 = pd.read_csv('{}ssim_500_nogauss.csv'.format(data_path), header=None)
ssim_140 = pd.read_csv('{}ssim_140_nogauss.csv'.format(data_path), header=None)
print('images loaded...')
# getting 4 input channels for train and test --- (orig, recon, orig squared, recon squared)
original_images_train = orig_500.values
original_images_train_sq = orig_500.values**2
reconstructed_images_train = recon_500.values
reconstructed_images_train_sq = recon_500.values**2
original_images_test = orig_140.values
original_images_test_sq = orig_140.values**2
reconstructed_images_test = recon_140.values
reconstructed_images_test_sq = recon_140.values**2
# stack inputs
training_input = np.dstack((original_images_train, reconstructed_images_train, original_images_train_sq, reconstructed_images_train_sq))
testing_input = np.dstack((original_images_test, reconstructed_images_test, original_images_test_sq, reconstructed_images_test_sq))
# normalize inputs
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
# target values
training_target = ssim_500.values
testing_target = ssim_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping features to (num images, 96x96, 4 channels)
train_features = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,input_layer])
test_features = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,input_layer])
# reshaping target to --- (num images, 86x86, 1)
train_target = np.reshape(training_target, [train_size, result_dim, result_dim, output_layer])
test_target = np.reshape(testing_target, [test_size, result_dim, result_dim, output_layer])
# initializing filters, this is what we are trying to learn --- fan in
scaling_factor = 0.1
initializer = tf.contrib.layers.xavier_initializer()
weights = {
'weights1': tf.get_variable('weights1', [filter_dim,filter_dim,input_layer,first_layer], initializer=initializer),
'weights2': tf.get_variable('weights2', [filter_dim2,filter_dim2,first_layer,second_layer], initializer=initializer),
'weights3': tf.get_variable('weights3', [filter_dim2,filter_dim2,second_layer,third_layer], initializer=initializer),
'weights4': tf.get_variable('weights4', [filter_dim2,filter_dim2,third_layer,fourth_layer], initializer=initializer),
'weights_out': tf.get_variable('weights_out', [filter_dim2,filter_dim2,fourth_layer+third_layer+second_layer+first_layer,output_layer], initializer=initializer)
}
biases = {
'bias1': tf.get_variable('bias1', [first_layer], initializer=initializer),
'bias2': tf.get_variable('bias2', [second_layer], initializer=initializer),
'bias3': tf.get_variable('bias3', [third_layer], initializer=initializer),
'bias4': tf.get_variable('bias4', [fourth_layer], initializer=initializer),
'bias_out': tf.get_variable('bias_out', [output_layer], initializer=initializer)
}
# tensorflow setup
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, input_layer])
y = tf.placeholder(tf.float32, [None, result_dim, result_dim, output_layer])
# model
prediction = conv_net(x, weights, biases)
print(prediction)
print(x)
# get variance to normalize error terms during training
variance = get_variance(train_target)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
# error arrays
training_error, testing_error = [], []
epoch_time = np.asarray([])
saver = tf.train.Saver()
# tensorflow session & training
with tf.Session() as sess:
sess.run(init)
global_start_time = time.time()
print('starting training...')
for epoch_count in range(epochs):
start_time = time.time()
epoch = get_epoch(train_features, train_target, batch_size)
for i in epoch:
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
train_loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
training_error.append(100*train_loss/variance)
test_loss = sess.run(cost, feed_dict={x : test_features, y : test_target})
testing_error.append(100*test_loss/variance)
end_time = time.time()
epoch_time = np.append(epoch_time, end_time-start_time)
print('current epoch: {} -- '.format(epoch_count)
+'current train error: {:.4f} -- '.format(100*train_loss/variance)
+'average epoch time: {:.4}s '.format(epoch_time.mean()))
# plot the current error
f, axarr = plt.subplots(nrows=1, ncols=1, figsize=(9,6))
axarr.plot(np.arange(epoch_count+1), training_error, label='train')
axarr.plot(np.arange(epoch_count+1), testing_error, label='test')
axarr.legend()
axarr.set_ylim(0,100)
plt.savefig('ssim_net1.png')
if epoch_count % 100 == 0:
saver.save(sess, 'ssimNET{}'.format(epoch_count))
print('training finished.')
if __name__ == '__main__':
main()
| mit |
jskDr/jamespy | tflearn/linear_regression.py | 7 | 2600 | '''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
print "Testing... (L2 loss Comparison)"
testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) #same function as cost above
print "Testing cost=", testing_cost
print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | mit |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
rajathkumarmp/BinPy | BinPy/analog/sig_gen.py | 4 | 22882 | from BinPy import *
import threading
import math
import time
import sys
class SignalGenerator(threading.Thread):
"""
Signal Generator Block
======================
Create a SignalGenerator object ( runs on a child thread ) used to generate
an analog voltage signal of the desired type. The frequency and amplitude
parameters can be customized.
All inputs / outputs / enable are Bus types
METHODS
=======
set_enable(enable_Bus_1_bit) : link the enable_Bus_1_bit with the Buffer's enable.
set_type(integer_type) : Set the output waveform type.
==============================================================
| TYPE_NO | TYPE | Built-in Constant Name |
==============================================================
| 0 | Sine | SignalGenerator.SIN |
| 1 | Square | SignalGenerator.SQUARE |
| 2 | Ramp | SignalGenerator.RAMP |
| 3 | Triangular | SignalGenerator.TRIANGULAR |
| 4 | TTL | SignalGenerator.TTL |
==============================================================
set_attenuation(float) : The integer_level decides the attenuation level.
Though this can be configured to any custom floating
point value, it is adviced to choose among the
standard attenuation levels using the SignalGenerator.ATTEN_LEVEL dict.
set_amplitude(float_percent_max) : The amplitute is set as a linearly varying percent of the total
maximum ( out of 100 ) for the current Attenuation level.
set_outputs(output_Bus_2_bit) : The output is linked to the 2 Bit Bus passed to the method.
output_Bus_2_bit[0] will link to the signal generator output
output_Bus_2_bit[1] will link to the SignalGenerator's GND
set_frequency_range(tuple) : set the frequency range of the SignalGenerator object.
NOTE: While this can be configured to any value, it is very strongly
emphasized to use standard frequency ranges as defined in the
SignalGenerator.FREQ_RANGE dict which contains tuples of predefined ranges.
This will ensure stable behaviour.
set_frequency(float_percent_max) : The final output frequency is the float_percent_max percent of the
currently set frequency range.
set_frequency_exact(float) : This method is used to set the exact output frequency of the
SignalGenerator object. If the given value is above SignalGenerator.FREQ_MAX
or below SignalGenerator.FREQ_MIN the output frequency saturates at the either
extremes.
NOTE: This method is preferred over setting custom frequency ranges.
set_amplitude_exact(float) : This method is used to set the exact output amplitude of the SignalGenerator
object. If the given value is above SignalGenerator.AMPL_MAX or below
SignalGenerator.AMPL_MIN the output amplitude saturates at the either extremes.
NOTE: This method is preferred over setting custom amplitude attenuation values.
set_modulation_input(Bus_2_bit) : Link the external modulation input to modulate the output accordingly.
set_modulation_type(integer_type): Set the modulation type to the desired value.
=======================================================
| TYPE_NO | TYPE | Built-in Constant Name |
=======================================================
| 0 | NONE | SignalGenerator.NO_MOD |
| 1 | AM | SignalGenerator.AM_MOD |
| 2 | FM | SignalGenerator.FM_MOD |
=======================================================
trigger() : Calculate and update the time varying output based on the current setting.
PROPERTIES
==========
type : Returns the type no of the output waveform
frequency : Current Frequency of the output ( in Hz )
frequency_range : Returns a tuple of the current frequency range
time_period : Current Time period of the output in s
attenuation : Current Attenuation level in db
amplitude : Current amplitude level
enabled : Returns true if the enable input is HIGH
disabled : Returns true if the enable input is LOW
sampling_time_interval : Returns the updation time interval of the output
last_updated_time : Returns a floating point value corresponding
to the last updated time
updating : Returns true if the updation is not yet over
CONSTANTS
=========
AMPL_MIN : 0.0 v
AMPL_MAX : 10.0 v
FREQ_MIN : 0.1 Hz ( 10s Total time period )
FREQ_MAX : 1,000,000,000 Hz ( 1 GHz )
NO_MOD : 0
AM_MOD : 1
FM_MOD : 2
SIN : 0
SQUARE : 1
RAMP : 2
TRIANGULAR : 3
TTL : 4
FREQ_RANGE : {
0 : ( 0.1, 10 ),
1 : ( 10, 100 ),
2 : ( 100, 1k ),
3 : ( 1k, 10k ),
4 : ( 10k, 100k ),
5 : ( 100k, 1M ),
6 : ( 1M, 10M ),
7 : ( 10M, 1G )
}
ATTEN_LEVEL : { 0 : 0db, 1 : 10db, 2 : 20db, 3 : 30db, 4 : 40db }
USAGE
=====
>>> sig1 = SignalGenerator(typ = SignalGenerator.SIN, freq = 1000, ampl = 5.0)
>>> a = Bus(2); a.set_type(analog=True);
>>> sig1.set_outputs(a)
AM SYNTHESIS USING 2 SIGNAL GENERATOR INSTANCES
===============================================
>>> from BinPy import SignalGenerator
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import math, time
>>> m_t = SignalGenerator(typ = 0, freq = 10, ampl = 1)
>>> m_t.set_offset(-0.5)
>>> c_t = SignalGenerator(typ = 0, freq = 100, ampl = 10)
>>> c_t.set_offset(-5) # To make the range as [-5, 5]
>>> c_t.set_modulation_input(m_t.outputs)
>>> c_t.set_modulation_type(1)
>>> time.sleep(0.5) # To allow setup time
>>> data = np.zeros(shape = (2, math.ceil(m_t.time_period / c_t.sampling_time_interval)))
>>> for i in range(data.shape[1]):
... data[0][i] = m_t.last_updated_time + m_t.time_period * i
... data[1][i] = c_t.outputs[0].voltage
... time.sleep(c_t.sampling_time_interval)
>>> fig, ax = plt.subplots()
>>> ax.plot(data[0], data[1])
>>> plt.show()
>>> m_t.kill()
>>> c_t.kill()
"""
# Defining the constants
AMPL_MIN = 0.0
AMPL_MAX = 10.0
FREQ_MIN = 0.1
FREQ_MAX = 1000000000
NO_MOD = 0
AM_MOD = 1
FM_MOD = 2
SIN = 0
SQUARE = 1
RAMP = 2
TRIANGULAR = 3
TTL = 4
FREQ_RANGE = {
0: (0.1, 10),
1: (10, 100),
2: (100, 1000),
3: (1000, 10000),
4: (10000, 100000),
5: (100000, 1000000),
6: (1000000, 10000000),
7: (10000000, 1000000000)
}
ATTEN_LEVEL = {0: 0, 1: 10, 2: 20, 3: 30, 4: 40}
def __init__(self, typ=0, freq=1000, ampl=5):
threading.Thread.__init__(self)
self.daemon = True
self.enable = Bus(1)
self.outputs = Bus(2)
self.outputs.set_type(analog=True)
self.set_offset(0.0)
self.mod_ip = Bus(2)
self.mod_ip.set_type(analog=True)
self.set_modulation_type(0) # Default value is no modulation
self.set_type(typ)
self._frequency = 1000.0
self.set_frequency_exact(freq)
# This will calculate the range / freq percent ( equiv. to freq. knob )
# and update them accordingly
self._amplitude = 1.0
self.set_amplitude_exact(ampl)
# This will calculate the ampl range / amplitude percent ( equiv. to
# amplitude knob ) and update them accordingly
self._last_updated_time = 0
self._exit = False
# Auto start the thread
self.start()
@property
def frequency(self):
return float(self._frequency)
@property
def time_period(self):
return float(self._time_period)
@property
def attenuation(self):
return float(self._attenuation)
@property
def amplitude(self):
return float(self._amplitude)
@property
def offset(self):
return float(self._offset)
@property
def type(self):
return float(self._type)
@property
def enabled(self):
return bool(self._enable)
@property
def disabled(self):
return not self.enabled
@property
def frequency_range(self):
return self._frequency_range
@property
def sampling_time_interval(self):
return self._sampling_time_interval
@property
def last_updated_time(self):
return self._last_updated_time
@property
def updating(self):
return self._updating
def set_amplitude_exact(self, ampl):
""" Set the amplitude of the output signal """
if (type(ampl) not in [int, float]):
raise Exception(
"ERROR: Amplitude can only be a float ( or int ) value.")
if ampl <= self.AMPL_MIN:
self._amplitude = 0
self._attenuation = 0
if ampl >= self.AMPL_MAX:
self._amplitude = 10 # 10v is the maximum amplitude
self._attenuation = 0
self._attenuation = 20 * (math.log10(float(ampl) / 10))
self._amplitude = float(ampl)
def set_attenuation(self, attenuation):
"""
Set the attenuation value. Use float values for input or use predefined
dictionary SignalGenerator.ATTEN_LEVEL
"""
if (type(attenuation) not in [int, float]):
raise Exception(
"ERROR: Attenuation can only be a float ( or int ) value.")
if (attenuation < 0):
raise Exception("ERROR: Only positive values can be given")
self._attenuation = attenuation
def set_amplitude(self, percent_range):
"""
Set the amplitude as the percentage value of the current range. - Knob like usage.
The current range is decided by the current attenuation value.
Set appropriate attenuation value before setting the amplitude.
"""
if (type(percent_range) not in [int, float]):
raise Exception("ERROR: Only int or float values can be passed.")
if (percent_range < 0 or percent_range > 100):
raise Exception(
"ERROR: Only values between 1 to 100 can be passed")
self._amplitude = float(self.AMPL_MIN +
(((10 ** (-
self._attenuation /
20)) *
self.AMPL_MAX) *
percent_range *
0.01))
def set_frequency_range(self, range_tuple):
"""
Set the frequency range of the output. Use ranges within
SignalGenerator.FREQ_MAX and SignalGenerator.FREQ_MIN.
Specify input as a tuple containg the LOW followed by the
HIGH frequency limits. Preferably use the predefined constant
dictionary SignalGenerator.FREQ_RANGE
FREQ_RANGE {
0 : ( 0.1, 10 ),
1 : ( 10, 100 ),
2 : ( 100, 1k ),
3 : ( 1k, 10k ),
4 : ( 10k, 100k ),
5 : ( 100k, 1M ),
6 : ( 1M, 10M ),
7 : ( 10M, 1G )
}
"""
if (type(range_tuple) not in [list, tuple]):
raise TypeError(
"ERROR: Invalid input. Input can be specified only as a list or a tuple")
if (len(range_tuple)) != 2:
raise TypeError(
"ERROR: The length of the frequency range tuple is not 2")
if (range_tuple[0] > range_tuple[1]):
raise TypeError(
"ERROR: Low frequency limit is higher than the High frequency limit")
if (range_tuple[0] < self.FREQ_MIN):
range_tuple[0] = self.FREQ_MIN
if (range_tuple[1] > self.FREQ_MAX):
range_tuple[1] = self.FREQ_MAX
self._frequency_range = tuple(range_tuple)
def set_frequency(self, percent_range):
"""
Set the frequency as the percentage value of the current range. - Knob like usage.
Set appropriate range before setting the frequency.
"""
if (type(percent_range) not in [int, float]):
raise TypeError("ERROR: Only int or float values can be passed.")
if (percent_range < 0 or percent_range > 100):
raise ValueError(
"ERROR: Only values between 1 to 100 can be passed")
self._frequency = float(self._frequency_range[
0] + (self._frequency_range[1] - self._frequency_range[0]) * percent_range)
if self._frequency != 0:
self._time_period = float(1) / self._frequency
else:
self._time_period = float("inf")
self._sampling_time_interval = self._time_period / 500.0
def set_frequency_exact(self, frequency):
"""
Set the given frequency as the freqency of the output. The current range will be varied based on the value.
"""
if (type(frequency) not in [int, float]):
raise TypeError(
"ERROR: Invalid Input type. Frequency can only be a float value")
if (frequency < self.FREQ_MIN):
self._frequency = self.FREQ_MIN
if (frequency > self.FREQ_MAX):
self._frequency = self.FREQ_MAX
self._frequency = float(frequency)
# Choose the appropriate frequency range based on the passed value.
for i in range(len(self.FREQ_RANGE)):
if (frequency <= self.FREQ_RANGE[i][
0] and frequency >= self.FREQ_RANGE[i][0]):
self._frequency_range = self.FREQ_RANGE[i]
break
if self._frequency != 0:
self._time_period = float(1) / self._frequency
else:
self._time_period = float("inf")
self._sampling_time_interval = self._time_period / 500.0
# Much greater than the nyquist rate for an accurate output with the
# least deviation.
def set_type(self, typ):
"""
Set the type number of the output waveform.
================================================================
| TYPE_NO | TYPE | Built-in Constant Name |
================================================================
| 0 | Sine | SignalGenerator.SIN |
| 1 | Square | SignalGenerator.SQUARE |
| 2 | Ramp | SignalGenerator.RAMP |
| 3 | Triangular | SignalGenerator.TRIANGULAR |
| 4 | TTL | SignalGenerator.TTL |
================================================================
It is recommended to use the Build-in Constants as inputs.
"""
if typ in range(5):
self._type = typ
else:
raise ValueError(
"ERROR: Invalid input. Please give a numerical value "
"between 0 and 4 ( both inclusive ) ")
def set_offset(self, offset):
"""
Set a DC offset voltage for the output waveform
Range of the acceptable offset value is
[ -SignalGenerator.AMPL_MAX, SignalGenerator.AMPL_MIN ]
"""
if type(offset) not in [float, int]:
raise TypeError(
"ERROR: Offset can be a float (or int value) within "
"the specified range only.")
if (offset > self.AMPL_MAX) or (offset < -self.AMPL_MAX):
raise TypeError(
"ERROR: Invalid offset value. Specify offset within the range")
self._offset = float(offset)
def set_enable(self, enable):
"""
Link the enable connector (or Bus_1_bit) with the SignalGenerator
object's enable input.
"""
with AutoUpdater._lock:
if isinstance(enable, Bus):
AutoUpdater.remove_link(self._enable)
AutoUpdater.add_link(
enable,
self._enable)
else:
raise ValueError(
"ERROR: Invalid Enable input. Enable must be a "
"1-bit Bus or a Connector.")
def set_outputs(self, outputs):
"""
Set the outputs to be linked with the outputs of the SignalGenerator
"""
if not isinstance(outputs, Bus):
raise TypeError("ERROR: Invalid output Bus")
if (outputs.width != self.outputs.width):
raise TypeError("ERROR: Output width mismatch.")
with AutoUpdater._lock:
AutoUpdater.remove_link(self.outputs)
AutoUpdater.add_link(
self.outputs,
outputs)
def set_modulation_type(self, mod_type):
"""
Set the modulation type that must be applied to the generated output signal,
which will act as a carrier signal
"""
if mod_type not in [0, 1, 2]:
raise ValueError(
"ERROR: Invalid input for modulation type. Allowed values are 0, 1 or 2")
self._mod_type = mod_type
def set_modulation_input(self, mod_input):
"""
Link the mod_input Analog Bus of width 2, with the modulating input.
"""
if (not isinstance(mod_input, Bus)) or (not mod_input.analog):
raise TypeError(
"ERROR: Invalid modulation input. The modulation input must be 2 connector analog Bus.")
if (mod_input.width != self.mod_ip.width):
raise Exception("ERROR: The bus must be a 2 connector Bus.")
with AutoUpdater._lock:
AutoUpdater.remove_link(self.mod_ip)
AutoUpdater.add_link(
mod_input,
self.mod_ip)
def run(self):
"""
The main run module which periodically updates the output.
"""
try:
while not self._exit:
# Update the time varying value of the output.
# The current time offset
cur_time_offset = time.time() % self._time_period
self._updating = True
# If modulation is selected as FM
if (self._mod_type == 2):
# Getting the modulating input
m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage
freq = self._frequency + m_t
if freq != 0:
time_p = 1 / freq
else:
time_p = float("inf")
else:
freq = self._frequency
time_p = self._time_period
# If sine wave
if (self.type == 0):
self._last_updated_time = cur_time_offset
voltage = 0.5 * math.sin(
2 * 3.145926 * freq * cur_time_offset) + 0.5
# If square wave
elif (self.type == 1 or self.type == 4):
self._last_updated_time = cur_time_offset
voltage = 1 if (
(cur_time_offset) < time_p /
float(2)) else 0
# If Ramp
elif (self.type == 2):
self._last_updated_time = cur_time_offset
voltage = cur_time_offset / time_p
# If triangular
else:
self._last_updated_time = cur_time_offset
voltage = 2 * cur_time_offset / time_p if (
(cur_time_offset) < time_p /
float(2)) else (2 * (time_p - cur_time_offset) / time_p)
if (self._mod_type == 1):
m_t = self.mod_ip[0].voltage - self.mod_ip[1].voltage
c_t = voltage * self._amplitude + self.offset
voltage = (1 + m_t) * c_t
voltage /= self._amplitude
if (self.type != 4):
voltage *= self._amplitude
else:
voltage *= 5.0 # TTL amplitude is constant at 5v
self.outputs[0].voltage = voltage
self.outputs[1].voltage = -self.offset
self._updating = False
time.sleep(self._sampling_time_interval)
except Exception as e:
return
def kill(self):
""" To terminate this thread """
self._exit = True
| bsd-3-clause |
stefantkeller/VECSELsetup | eval/gen_functions.py | 1 | 16307 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
from os.path import exists
import csv
import re
from itertools import izip as zip, count # izip for maximum efficiency: http://stackoverflow.com/questions/176918/finding-the-index-of-an-item-given-a-list-containing-it-in-python
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from varycolor import varycolor
def load(path,ignore_wrong_entries=False,return_header_dict=False):
with open(path,'rb') as fn:
header = fn.readline() # read and thus skip header
hdict = split2dict(header,el_sep=';',en_sep='=',ignore_wrong_entries=ignore_wrong_entries)
columns = hdict['columns'].split(',')
N = len(columns)
r = csv.reader(fn,delimiter=',')
storage = {}
for e in range(N):
storage[e] = []
for row in r:
if row[0].startswith('#') or len(row)!=N: continue # a commented or invalid entry
for e in range(N):
storage[e].append(row[e])
retlist = [storage[e] for e in range(N)]
if return_header_dict: retlist.append(hdict)
return retlist
def sanitize(strng):
strng=strng.replace('\n','')
strng=strng.replace('\r','')
return strng
def unorder_to_stderrval(x,LUT,index_list):
x = map(float,x)
x = LUT(x,index_list)
x = ev.stderrvallist(x)
return x
def extract(logfile,identifiers=['Current','Pump','Refl','Laser']):
# beyond default identifiers:
# 'Spectra' (in order to extract spectra run LL_spectrum.py first - this writes the necessary eval files)
# 'Temperature' (mean +- std (!) heat sink temperature seen after each measurement)
rootpath = '/'.join(logfile.split('/')[:-1])
# each temperature delivers a list of values attached with errorbars
current_set = {} # this we need irrespective of whether we request it as a return value
# store some boolean values:
bcurr = 'Current' in identifiers
bpump = 'Pump' in identifiers
brefl = 'Refl' in identifiers
blaser = 'Laser' in identifiers
bspectra = 'Spectra' in identifiers
btemperature = 'Temperature' in identifiers
if bcurr: current = {}
if bpump: pump = {}
if brefl: reflection = {}
if blaser: emission = {}
if bspectra: spectra = {}
if btemperature: temperature = {}
#print bcurr,bpump,brefl,blaser
with open(logfile,'rb') as lf:
header = lf.readline() # read and thus skip header
hdict = split2dict(header, ';','=')
columns = hdict['columns'].split(',')
cid = dict( zip(columns,range(len(columns))) ) # easily accessible dict with instructions of what the columns refer to
pwr_folder = sanitize(hdict['pwr_folder'])
pwr_root = rootpath+'/'+pwr_folder+'/'
n_repetitions = int(hdict['n_repetitions'])
logfile_version = hdict['logfile_version']
#n_powermeters = hdict['n_powermeters']
r = csv.reader(lf,delimiter=',')
for row in r:
if row[0].startswith('#'): continue # a commented entry => skip
if 'Temp' in cid:
T = float(row[cid['Temp']])
elif 'Temp_set' in cid:
T = float(row[cid['Temp_set']])
else:
T = np.nan
timestamp = float(row[cid['Time_stamp']])
cf = pwr_root+row[cid['Current']] # if 'Current' not in cid this raises a KeyError!
c_set, c_got = load(cf)
c_set, c_got = map(float,c_set), map(float,c_got)
# create look up table in order to correlate the duplicate entries with one another
ordered = find_random_duplicates(c_set)
current_set[T] = np.array(sorted(ordered.keys()))
index_list = np.array([ordered[c] for c in current_set[T]]).transpose() # look up table with indices corresponding to same set current, in ascending order!
LUT = lambda x, indices: np.array([[x[i] for i in duplicate] for duplicate in indices])
#
# with the created LUT we reorder power readings:
# every column corresponds to a set current,
# we have as many duplicates as rows.
#
# assign errorbars to the measurements:
# the errorbars correspond to the standard error
# ste = std/sqrt(N)
# the standard error tells us how well we know the mean of a distribution
# this knowledge increases with the number of repetitions N
# or in other words: averaging increases signal-to-noise.
#
# eventually:
# subtract background and
# store them in dict assigned to their temperature
if bcurr:
curr = unorder_to_stderrval(c_got,LUT,index_list)
#curr = LUT(c_got,index_list)
#curr = ev.stderrvallist(curr)
#curr -= ev.min_(curr)[0]
current[T] = curr# - ev.min(curr)[0]
if bpump:
if sanitize(logfile_version)=='p1':
pf = pwr_root+row[cid['Power']]
else:
pf = pwr_root+row[cid['Pump']]
pmp, ptime = load(pf)
pmp = unorder_to_stderrval(pmp,LUT,index_list)
#pmp = map(float,pmp)
#pmp = LUT(pmp,index_list)
#pmp = ev.stderrvallist(pmp)
#pmp -= ev.min_(pmp)[0]
pump[T] = pmp# - ev.min(pmp)[0]
if brefl:
rf = pwr_root+row[cid['Refl']]
refl, rtime = load(rf)
refl = unorder_to_stderrval(refl,LUT,index_list)
#refl = map(float, refl)
#refl = LUT(refl,index_list)
#refl = ev.stderrvallist(refl)
#refl -= ev.min_(refl)[0]
reflection[T] = refl# - ev.min(refl)[0]
if blaser:
lf = pwr_root+row[cid['Laser']]
las, ltime = load(lf)
las = unorder_to_stderrval(las,LUT,index_list)
#las = map(float, las)
#las = LUT(las, index_list)
#las = ev.stderrvallist(las)
#las -= ev.min_(las)[0]
emission[T] = las# - ev.min(las)[0]
if btemperature:
tf = pwr_root+row[cid['Temp_live']]
tcurr, ths = load(tf)
ths = map(float,ths)
temperature[T] = ev.errval(np.mean(ths),np.std(ths,ddof=1))
if bspectra:
sf = row[cid['Spectra']]
sf = pwr_root+'.'.join(sf.split('.')[:-1])+r'_eval.csv' # in order for this to exist: run LL_spectrum.py first!
scurrent, lambda_short, lambda_long = load(sf) # scurrent == c_set
lambda_short = unorder_to_stderrval(lambda_short,LUT,index_list)
lambda_long = unorder_to_stderrval(lambda_long,LUT,index_list)
spectra[T] = [lambda_short,lambda_long] # this is a list of lists!
return_list = [current_set]
if bcurr: return_list.append(current)
if bpump: return_list.append(pump)
if brefl: return_list.append(reflection)
if blaser: return_list.append(emission)
if bspectra: return_list.append(spectra)
if btemperature: return_list.append(temperature)
return return_list
def plotinstructions_write(instrfile,Temperatures,calib_folder):
# Temperatures ought to be sorted!
create_new = not exists(instrfile)
if create_new:
with open(instrfile,'wb') as f:
f.write('Instruction file for the script to plot and fit (using {} for calibration):\n'.format(calib_folder))
f.write('textx=1\n')
f.write('fontsize=12\n')
f.write('title=[-> insert meaningful title here <-]\n')
f.write('xlim=[0,14]\n')
f.write('ylim1=[-0.01,3]\n')
f.write('ylim2=[20,50]\n')
for T in Temperatures:
f.write('texty1[{0}]=2\n'.format(T))
for T in Temperatures:
f.write('llow0[{0}]=2\n'.format(T))
f.write('lhigh0[{0}]=10\n'.format(T))
f.write('xlim3=[2,10]\n')
f.write('ylim3=[1260,1270]\n')
f.write('plot_temps_for_3=[')
for T in Temperatures:
f.write('{0}'.format(T))
if not T==Temperatures[-1]: f.write(',')
f.write(']\n')
f.write('textx3=3\n')
f.write('texty3=[1265,1263]\n')
f.close() # close to actually write -- until now it's only in the buffer!
print 'Open file {0} and modify instructions.'.format(instrfile)
def plotinstructions_read(instrfile):
instr = {}
with open(instrfile,'rb') as f:
f.readline() # skip header
r = csv.reader(f,delimiter='=')
for row in r: instr[row[0]]=row[1]
return instr
def lut_from_calibfile(address, ignore_error=False):
lin_lut = lambda m,q: lambda x: x*m+q
with open(address, 'rb') as f:
header = f.readline()
hdict = split2dict(header, ';','=')
linreg = hdict['linreg']
try:
mq = ev.str2errvallist(linreg)
if not ignore_error:
m0, q0 = mq[0], mq[1]
else:
m0, q0 = mq[0].v(), mq[1].v()
except IndexError: # legacy, in old calibrations mq is an empty list (i.e. no index)
try:
p = re.compile('(\d*\.\d*)\*\w+\+(-?\d*\.\d*)')
m = p.match(linreg)
m0 = float(m.group(1))
q0 = float(m.group(2))
except AttributeError, e:
print address,linreg
raise AttributeError, e
return lin_lut(m0,q0)
def lut_from_calibfolder(calib_folder,identifiers=['Pump','Refl','Laser'],ignore_error=False):
bcurr = 'Current' in identifiers
bpump = 'Pump' in identifiers
brefl = 'Refl' in identifiers
blaser = 'Laser' in identifiers
lut_1x1 = lambda x: x # 'do nothing', hence you can use the same 'lut'-expression, but use the raw data; for uncalibrated elements
retlutlist = []
if bcurr:
calib_curr = calib_folder+'LUTs/calib_pump_current'
if exists(calib_curr):
curr_lut = lut_from_calibfile(calib_curr,ignore_error)
else:
curr_lut = lut_1x1
retlutlist.append(curr_lut)
if bpump:
calib_pump = calib_folder+'/LUTs/calib_pump.csv'
if exists(calib_pump):
pump_lut = lut_from_calibfile(calib_pump,ignore_error)
else:
pump_lut = lut_1x1
retlutlist.append(pump_lut)
if brefl:
calib_refl = calib_folder+'/LUTs/calib_reflection.csv'
if exists(calib_refl):
refl_lut = lut_from_calibfile(calib_refl,ignore_error)
else:
refl_lut = lut_1x1
retlutlist.append(refl_lut)
if blaser:
calib_emission = calib_folder+'/LUTs/calib_emission.csv'
if exists(calib_emission):
emis_lut = lut_from_calibfile(calib_emission,ignore_error)
else:
emis_lut = lut_1x1
retlutlist.append(emis_lut)
return retlutlist if len(retlutlist)>1 else retlutlist[0] # as one would expect if only one is requested
def lut_interp_from_calibfile(address):
with open(address, 'rb') as f:
header = f.readline() # skip header
r = csv.reader(f,delimiter=',')
x,y = ev.errvallist([]),ev.errvallist([])
for row in r:
x.append(ev.errval(float(row[0]),float(row[1])))
y.append(ev.errval(float(row[2]),float(row[3])))
xsort = ev.sorting_instr(x) # make sure order is ascending
x = ev.reorder(x,xsort)
y = ev.reorder(y,xsort)
lut = lambda v: ev.interplist(v,x,y)
def value_scale(v):
if not isinstance(v,ev.errval): return 1
elif v.v()==0: return 0
else: return v/v.v()
return lambda evlist: ev.errvallist([lut(v)*value_scale(v) for v in evlist])
def lut_interp_from_calibfolder(calib_folder,identifiers=['Pump','Refl','Laser']):
bcurr = 'Current' in identifiers
bpump = 'Pump' in identifiers
brefl = 'Refl' in identifiers
blaser = 'Laser' in identifiers
lut_1x1 = lambda x: x # 'do nothing', hence you can use the same 'lut'-expression, but use the raw data; for uncalibrated elements
retlutlist = []
if bcurr:
calib_curr = calib_folder+'LUTs/calib_pump_current'
if exists(calib_curr):
curr_lut = lut_interp_from_calibfile(calib_curr)
else:
curr_lut = lut_1x1
retlutlist.append(curr_lut)
if bpump:
calib_pump = calib_folder+'/LUTs/calib_pump.csv'
if exists(calib_pump):
pump_lut = lut_interp_from_calibfile(calib_pump)
else:
pump_lut = lut_1x1
retlutlist.append(pump_lut)
if brefl:
calib_refl = calib_folder+'/LUTs/calib_reflection.csv'
if exists(calib_refl):
refl_lut = lut_interp_from_calibfile(calib_refl)
else:
refl_lut = lut_1x1
retlutlist.append(refl_lut)
if blaser:
calib_emission = calib_folder+'/LUTs/calib_emission.csv'
if exists(calib_emission):
emis_lut = lut_interp_from_calibfile(calib_emission)
else:
emis_lut = lut_1x1
retlutlist.append(emis_lut)
return retlutlist
def thermal_resistance(T,wavelength,dlambdadPd):
# based on Heinen2012's equation (2)
# lambda = dlambda/dT*T + dlambda/dPd*Pd + lambda_0
# = dlambdadPd*Pd + wavelength
# with
# lambda_0_ = dlambdadT*T + lambda_00
# (lambda_00 is virtual wavelength at no dissipated power
# and heat sink temperature of 0 degC
# (resp. at T=0 of whatever the unit of input temperatures is...))
#
# R_th = dlambda/dPd / dlambda/dT = dT/dPd
l0, dldT = ev.linreg(T.v(), wavelength.v(),wavelength.e())
#dldD = ev.stderrval(dlambdadPd)
dldD = ev.wmean(dlambdadPd) # supposed to be the same value measured several times => weighted mean
#R_th = dldD/dldT
return dldD, dldT, l0#, R_th
def split2dict(strng,el_sep=';',en_sep='=',ignore_wrong_entries=True):
# 'a=1;b=2;c=3' -> {'a': '1', 'c': '3', 'b': '2'}
# strng = 'a=1;b=2;c=3'
# el_sep: element ('a=1',...) separator, e.g. ';'
# en_sep: entry ('a'='1',...) separator, e.g. '='
# if not ignore_wrong_entries:
# string whose parts doesn't fit above described pattern raises an error
# e.g. 'a=1;without equal sign;valid=True' raises a ValueError
# if ignore_wrong_entries:
# these ValueErrors are ignored (pass)
# see github.com/stefantkeller/STK_py_generals/general_functions.py
el = strng.split(el_sep)
splt = lambda x: x.split(en_sep)
retdict = {} # return dict( (e0,e1) for e0,e1 in map( splt, el ) )
for e in el:
try:
e0,e1 = splt(e)
retdict[e0]=e1
except ValueError, e:
if ignore_wrong_entries: pass
else:
print strng
raise ValueError, e
return retdict
def find_random_duplicates(input):
# input = [10,11,12,13,14,13,12,10,11,11,12,13,14,10,14]
# (indices[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14])
# return: {10: [0, 7, 13], 11: [1, 8, 9], 12: [2, 6, 10], 13: [3, 5, 11], 14: [4, 12, 14]}
#http://stackoverflow.com/questions/176918/finding-the-index-of-an-item-given-a-list-containing-it-in-python
#http://stackoverflow.com/questions/479897/how-do-you-remove-duplicates-from-a-list-in-python-if-the-item-order-is-not-impo
# see github.com/stefantkeller/STK_py_generals/general_functions.py
deduplicated = sorted(set(input))
indexlist = [[i for i, j in zip(count(), input) if j == k] for k in deduplicated]
return dict((e0,e1) for e0,e1 in zip(deduplicated,indexlist))
def main():
pass
if __name__ == "__main__":
main()
| mit |
akionakamura/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
jyheo/pywsn | chart.py | 1 | 1761 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
color_tuples = [(255,128,128), (255,160,128), (255,255,152), (152,255,152), (128,255,208), (128,255,255),
(128,160,255), (128,128,255), (192,128,255), (255,128,255)]
def colors_str():
return ['#%02x%02x%02x' % (t[0], t[1], t[2]) for t in color_tuples]
def chart_3d(data, labels = None):
'''
draw 3d chart using data
:param data: list of tuples (x, y, z)
:return: void
'''
x_val = sorted(set([t[0] for t in data]))
y_val = sorted(set([t[1] for t in data]))
d = max(len(x_val), len(y_val))
dx = float(max(x_val) - min(x_val)) / d * 0.7
dy = float(max(y_val) - min(y_val)) / d * 0.7
x_pos = [t[0] - dx / 2 for t in data]
y_pos = [t[1] - dy / 2 for t in data]
z_height = [t[2] for t in data]
color_tmp = colors_str()
num = len(x_pos)
colors = color_tmp * (num / len(color_tmp)) + color_tmp[:num % len(color_tmp)]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
ax.bar3d(x_pos, y_pos, [0]*len(x_pos), dx, dy, z_height, color=colors)
plt.show()
def chart_lines(xdata, data, labels=None, markers=None, legend_labels=None):
fig = plt.figure()
ax = fig.add_subplot(111)
for d in range(len(data)):
if markers and legend_labels:
ax.plot(xdata, data[d], marker=markers[d], label=legend_labels[d])
else:
ax.plot(xdata, data[d])
if labels is not None:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.legend(loc=2)
plt.show()
| gpl-3.0 |
zorojean/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
actuaryzhang/spark | python/pyspark/sql/context.py | 12 | 21873 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since, _NoValue
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.udf import UDFRegistration
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return self.sparkSession.udf
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@since(1.2)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkContext._conf.set("spark.sql.catalogImplementation", "hive")
sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
justincassidy/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
laurensstoop/HiSPARC-BONZ | egg/legacy/egg_saskia_v3.3.py | 1 | 7620 | # -*- coding: utf-8 -*-
"""
===================================
Created on Thu Mar 24 13:17:57 2016
@author: Laurens Stoop
===================================
"""
################################## HEADER ##################################
"""
Import of Packages
"""
import sapphire # The HiSparc Python Framework
import tables # A HDF5 python module that allows to store data
import datetime # A package to decode the timeformat of HiSparc data
import matplotlib.pyplot as plt # Plotting functionality of MatPlotLib
import numpy as np # This is NumPy
import os.path # To check if files exist (so you don't do stuff again)
import rootpy.plotting # Get the pythonesc version of ROOT
from rootpy.plotting import root2matplotlib
"""
Getting the data file and setting the variables
"""
# Time between which the data is downloaded (jjjj,mm,dd,[hh])
START = datetime.datetime(2016,03,01)
END = datetime.datetime(2016,03,04)
# Give the list of stations
STATIONS = [501] #,1006,13002,14001,20003]
# Do not show the figures
plt.ioff()
################################## BODY ##################################
"""
Data acquisition
"""
# Open a data file (automatic close)
with tables.open_file('egg_saskia.h5','a') as data_file:
# Retrieve for every station the data and plot a pulsehisto
for station in STATIONS:
# Set the station name (this is the group name in the file)
station_name = '/s%d' %station
# Data is downloaded
if station_name not in data_file:
# Let them know what we do
print "\nGetting data from station %d " % station
# Now retrieve the event data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'events', # Download events (or 'weather')
True) # Show progress
# Now retrieve the wheater data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'weather', # Download wheater
True) # Show progress
# If the datafile has the group we do not download them data
else:
print "Data present for station %d" % station
####### Pulseheight histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pulseheigt_histogram_%d.pdf' % station):
# Say if the plot is present
print "Plot already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Set the figure
figure_pulse = plt.figure(station)
# Get the pulseheight from all events
data_pulseheight = event_data.col('pulseheights') # col takes all data from events
# Creates bins so that the ugly shit is taken away
bins = np.linspace(0, 4000, 201)
# Plotting the pulseheigth for all events
plt.hist(
data_pulseheight, # Plot the Pulseheight
bins, # Number of bins
histtype='step', # Make the histogram a step function
log=True) # With a logarithmic scale
# Setting the plot labels and title
plt.xlabel("Pulseheight [ADC]")
plt.ylabel("Counts")
plt.title("Pulseheight histogram (log scale) for station (%d)" %station)
# Saving them Pica
plt.savefig(
'pulseheigt_histogram_%d.pdf' % station, # Name of the file
bbox_inches='tight') # Use less whitespace
# Necessary to avoid multiplotting in one figure and to close memory leak
plt.close(figure_pulse)
####### Pulseheight vs pulse integral histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pulseheigt_vs_integral_histogram_%d.pdf' % station):
# Say if the plot is present
print "Histo 2D already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Get the pulseheight from all events
data_pulseheights = event_data.col('pulseheights') # col takes all data from events (this improves the speed)
data_pulseheight_1 = data_pulseheights[:,0] #the first column is select
data_pulseheight_2 = data_pulseheights[:,1] #the sccond column is select
data_pulseheight_3 = data_pulseheights[:,2] #the third column is select
data_pulseheight_4 = data_pulseheights[:,3] #the fourth column is select
# Get the integral from all events
data_integrals = event_data.col('integrals') # col takes all data from events
data_integral_1 = data_integrals[:,0] # You get it by now!
data_integral_2 = data_integrals[:,1]
data_integral_3 = data_integrals[:,2]
data_integral_4 = data_integrals[:,3]
# Make a figure so it can be closed
figure_pulse_int = plt.figure(station)
# for detector in range(0,3):
data_combo = np.stack(
(data_integral_1, # The pulse integral on y axis
data_pulseheight_1), # The pulseheight on x axis
axis=-1) # To get the direction correct
a = rootpy.plotting.Hist2D(100, 0, 100000, 100, 0, 4500)
a.fill_array(data_combo)
canvas = rootpy.plotting.Canvas(width = 800, height = 600)
a.Draw("colz")
rootpy.interactive.wait()
fig = plt.figure(station)
root2matplotlib.hist2d(a)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.savefig(
'hist2d.pdf', # Name of the file
bbox_inches='tight')
plt.close(fig)
print "####### I'm Done Bitches! #######"
################################## FOOTER ##################################
"""
Clean up shit
"""
| gpl-3.0 |
anilmuthineni/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
jniediek/combinato | tools/plot_thr_and_artifacts.py | 1 | 4805 | # -*- encoding: utf-8 -*-
# JN 2015-05-11
# what do I want in this script?
# * extraction thresholds in each region
# (and regression line, or other measure of variability)
# * rationale it's interesting to see whether highly variable channels are
# in one macro
# * firing rate stability for non-artifacts (correlated with threshold?)
# * percentage of artifacts of each category (correlated within region?)
# structure of the script
# region -> find channels -> plot extraction thr and firing rates
# -> write down artifact criteria
"""
plot extraction thresholds and spike counts over time
"""
from __future__ import print_function, division
import os
import numpy as np # pylint: disable=E1101
import tables
import matplotlib.pyplot as mpl
import matplotlib.cm as cm
from combinato import SortingManagerGrouped, get_regions, artifact_id_to_name
print(artifact_id_to_name)
FIGSIZE = (7, 6)
REGIONS = ('A', 'AH', 'MH', 'PH', 'EC', 'PHC', 'I')
LEFT_COLORS = cm.spectral(np.linspace(0, 1, len(REGIONS)))
RIGHT_COLORS = cm.summer(np.linspace(0, 1, len(REGIONS)))
NUM_COLORS = cm.winter(np.linspace(0, 1, 8))
COLORDICT = {}
for i, region in enumerate(REGIONS):
COLORDICT['L' + region] = LEFT_COLORS[i]
COLORDICT['R' + region] = RIGHT_COLORS[i]
for i in range(1, 9):
COLORDICT[str(i)] = NUM_COLORS[i-1]
JOBS = ('thr', 'arti')
def plotthr(thrplot, fireplot, thr, times, color='r'):
"""
plot the threshold data
"""
xdata = thr[:, :2].ravel()
xdata -= xdata[0]
xdata /= 6e4
xlim = xdata[[0, -1]]
thrs = np.vstack((thr[:, 2], thr[:, 2])).T.ravel()
thrplot.plot(xdata, thrs, color=color)
thrplot.set_xlim(xlim)
xtimes = (times - times[0])/6e4
# countdata = np.linspace(0, 1, len(xtimes))
# fireplot.plot(xtimes, countdata, color=color)
# fireplot.set_xlim(xlim)
bins = np.append(thr[:, 0], thr[-1, 1])
# bins /= 1e3
spcount, _ = np.histogram(times, bins=bins)
spcount = spcount.astype(float)
spcount /= times.shape[0]
print(spcount)
spplotdata = np.vstack((spcount, spcount)).T.ravel()
fireplot.plot(xdata, spplotdata, color=color)
def create_plots():
fig = mpl.figure(figsize=FIGSIZE)
plot = fig.add_subplot(1, 2, 1)
plot.set_ylabel(u'µV')
plot.set_xlabel(u'min')
plot.set_title(u'Extraction threshold over time')
plot2 = fig.add_subplot(1, 2, 2)
# plot2 = plot.twinx()
plot2.set_xlabel(u'min')
plot2.set_ylabel('% fired')
plot2.set_title('Spike count over time')
return plot, plot2
def plotarti(artifacts):
"""
plots artifact statistics
"""
tot = len(artifacts)
for artid, name in artifact_id_to_name:
perc = (artifacts == artid).sum()/tot
print('{}: {:.1%}'.format(name, perc))
def main(fnames, sign='pos', title=''):
"""
opens the file, calls plot
"""
thrplot, fireplot = create_plots()
thrplot.set_title(title)
thr_legend_handles = {}
for fname in fnames:
if os.path.isdir(fname):
fname = os.path.join(fname, 'data_' + fname + '.h5')
man = SortingManagerGrouped(fname)
thr = man.h5datafile.root.thr[:]
times = man.h5datafile.get_node('/' + sign, 'times')[:]
if not len(times):
continue
try:
artifacts = man.h5datafile.get_node('/' + sign, 'artifacts')[:]
except tables.NoSuchNodeError:
print('No artifacts defined')
artifacts = None
if man.header is not None:
entname = man.header['AcqEntName']
print(entname[-1])
color = COLORDICT[entname[-1]]
entname = entname[-1]
else:
color = 'k'
entname = 'unknown region'
del man
if 'thr' in JOBS:
plotthr(thrplot, fireplot, thr, times, color)
if entname not in thr_legend_handles:
thr_legend_handles[entname] = mpl.Line2D([0], [0], color=color,
label=entname)
if 'arti' in JOBS:
if artifacts is not None:
plotarti(artifacts)
# thrplot.legend(handles=thr_legend_handles.values())
def loop_over_regions(path):
"""
do the plots by region
"""
from collections import defaultdict
regions = get_regions(path)
regions_to_fnames = defaultdict(list)
for reg in regions:
ncsfiles = regions[reg]
for fname in ncsfiles:
if os.path.isdir(fname[:-4]):
regions_to_fnames[reg].append(os.path.basename(fname[:-4]))
for reg in regions_to_fnames:
main(regions_to_fnames[reg], 'pos', reg)
if __name__ == "__main__":
loop_over_regions(os.getcwd())
mpl.show()
| mit |
yavalvas/yav_com | build/matplotlib/examples/api/custom_projection_example.py | 9 | 18246 | from __future__ import unicode_literals
import matplotlib
from matplotlib.axes import Axes
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import NullLocator, Formatter, FixedLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
# This example projection class is rather long, but it is designed to
# illustrate many features, not all of which will be used every time.
# It is also common to factor out a lot of these methods into common
# code used by a number of projections with similar characteristics
# (see geo.py).
class HammerAxes(Axes):
"""
A custom class for the Aitoff-Hammer projection, an equal-area map
projection.
http://en.wikipedia.org/wiki/Hammer_projection
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='custom_hammer')``.
name = 'custom_hammer'
def __init__(self, *args, **kwargs):
Axes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until HammerAxes.xaxis.cla() works.
# self.spines['hammer'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
# Set up a default grid spacing
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
# Turn off minor ticking altogether
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
# Do not display ticks -- we only want gridlines and text
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
# The limits on this projection are fixed -- they are not to
# be changed by the user. This makes the math in the
# transformation itself easier, and since this is a toy
# example, the easier, the better.
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = self.HammerTransform()
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
xscale = 2.0 * np.sqrt(2.0) * np.sin(0.5 * np.pi)
yscale = np.sqrt(2.0) * np.sin(0.5 * np.pi)
self.transAffine = Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, np.pi) \
.translate(0.0, -np.pi)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def get_xaxis_transform(self,which='grid'):
"""
Override this method to provide a transformation for the
x-axis grid and ticks.
"""
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
In this case, it is a Circle (that may be warped by the axes
transform into an ellipse). Any data and gridlines will be
clipped to this shape.
"""
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'custom_hammer':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. In our case, we
# want to display the whole sphere all the time, so we override
# set_xlim and set_ylim to ignore any input. This also applies to
# interactive panning and zooming in the GUI interfaces.
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, lon, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
lon = lon * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
# \u00b0 : degree symbol
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
class DegreeFormatter(Formatter):
"""
This is a custom formatter that converts the native unit of
radians into (truncated) degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
# \u00b0 : degree symbol
return "%d\u00b0" % degrees
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface to set the
ticking than set_xticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
plt.FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface than
set_yticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
Often, in geographic projections, you wouldn't want to draw
longitude gridlines near the poles. This allows the user to
specify the degree at which to stop drawing longitude grids.
This is an example method that is specific to this projection
class -- it provides an interface to something that has no
analogy in the base Axes class.
"""
longitude_cap = degrees * (np.pi / 180.0)
# Change the xaxis gridlines transform so that it draws from
# -degrees to degrees, rather than -pi to pi.
self._xaxis_pretransform \
.clear() \
.scale(1.0, longitude_cap * 2.0) \
.translate(0.0, -longitude_cap)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
# Now, the transforms themselves.
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, ll):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
return HammerAxes.HammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(HammerAxes)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Now make a simple example using the custom projection.
plt.subplot(111, projection="custom_hammer")
p = plt.plot([-1, 1, 1], [-1, -1, 1], "o-")
plt.grid(True)
plt.show()
| mit |
alantian/polyglot | docs/conf.py | 4 | 10864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Solution to RTD problem suggested by
# http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/
import mock
MOCK_MODULES = ['numpy', 'PyICU', 'icu']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import polyglot
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.pngmath',
'sphinx.ext.linkcode',
# 'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
# 'IPython.sphinxext.ipython_console_highlighting',
# 'IPython.sphinxext.ipython_directive'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'polyglot'
copyright = u'2014-2015, Rami Al-Rfou'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = polyglot.__version__
# The full version, including alpha/beta/rc tags.
release = polyglot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#import klink
#html_theme = 'klink'
#html_theme_path = [klink.get_html_theme_path()]
#import alabaster
#html_theme = 'alabaster'
#html_theme_path = [alabaster.get_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'polyglotdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
'inputenc': '',
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setmainfont{Linux Libertine O}
%\setmonofont{DejaVu Sans Mono}
\setmonofont{Courier New}
%\setmonofont{FreeMono}
''',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index_latex', 'polyglot.tex',
u'polyglot Documentation',
u'Rami Al-Rfou', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'polyglot',
u'polyglot Documentation',
[u'Rami Al-Rfou'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'polyglot',
u'polyglot Documentation',
u'Rami Al-Rfou',
'polyglot',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('polyglot',
u'https://github.com/aboSamoor/polyglot'
'/blob/{revision}/'
'{package}/{path}#L{lineno}')
| gpl-3.0 |
jcouvy/convnet-nolearn | src/blog/net1.py | 1 | 1958 | # -------------------- Loading Data-set --------------------
import cPickle
import pandas as pd
import numpy as np
# The competition datafiles are in the directory ../input
# Read training and test data files
train = pd.read_csv("../../input/train.csv")
test = pd.read_csv("../../input/test.csv")
train_images = train.iloc[:,1:].values
train_labels = train[[0]].values.ravel()
# Reshape and normalize training data
trainX = train_images.reshape(train.shape[0], 1, 28, 28).astype(np.float32)
trainX /= 255.0
# Reshape and normalize test data
testX = test.values.reshape(test.shape[0], 1, 28, 28).astype(np.float32)
testX /= 255.0
trainY = train_labels.astype(np.int32)
# -------------------- Building the network -------------------
import theano
import lasagne
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet, PrintLayerInfo #used for verbose=2
net1 = NeuralNet(
layers = [('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer)
],
# Each digit is a 28x28 1-Dimensionnal image,
input_shape = (None, 1, 28, 28),
conv1_num_filters = 8, conv1_filter_size = (3, 3),
pool1_pool_size = (2, 2),
conv2_num_filters = 16, conv2_filter_size = (2, 2),
pool2_pool_size = (2, 2),
hidden_num_units = 64,
output_num_units = 10,
# Learning method.
update = nesterov_momentum,
update_learning_rate = 0.01,
update_momentum = 0.9,
# Length of the training
max_epochs = 100,
verbose = 2,
)
# Start training the network
net1.fit(trainX, trainY)
# Saving the net with cPickle to load it back later.
with open('net1.pickle', 'wb') as f:
pickle.dump(net1, f, -1)
| mit |
jjx02230808/project0223 | sklearn/metrics/cluster/supervised.py | 22 | 30444 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
kshedstrom/pyroms | examples/NWGOA3/Fetch_Pacific/get_pacific_v.py | 1 | 5086 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
year = int(sys.argv[1])
def create_HYCOM_file(name):
global nc
print 'Creating file %s' %name
#create netCDF file
nc = netCDF4.Dataset(name, 'w', format='NETCDF3_64BIT')
nc.Author = sys._getframe().f_code.co_name
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'ROMS + CoSiNE Pacific (U Maine)'
#create dimensions
Mp, Lp = zeta0.shape
Mp -= 1
N = 20
theta_s = 5
theta_b = 0
Tcline = 50
nc.createDimension('xi_v', Lp)
nc.createDimension('eta_v', Mp)
nc.createDimension('s_rho', N)
nc.createDimension('ocean_time', None)
#create variables
# nc.createVariable('lon_v', 'f', ('eta_v', 'xi_v'))
# nc.variables['lon_v'].long_name = 'longitude'
# nc.variables['lon_v'].units = 'degrees_east'
# nc.variables['lon_v'][:] = lon
#
# nc.createVariable('lat_v', 'f', ('eta_v', 'xi_v'))
# nc.variables['lat_v'].long_name = 'latitude'
# nc.variables['lat_v'].units = 'degrees_north'
# nc.variables['lat_v'][:] = lat
#
# nc.createVariable('s_rho', 'f', ('s_rho'))
# nc.variables['s_rho'].standard_name = 'ocean_s_coordinate'
# nc.variables['s_rho'].formula_terms = 's: s_rho eta: zeta depth: h a: theta_s b: theta_b depth_c: Tcline'
# nc.variables['s_rho'][:] = s_rho
nc.createVariable('theta_s', 'f')
nc.variables['theta_s'][:] = theta_s
nc.createVariable('theta_b', 'f')
nc.variables['theta_b'][:] = theta_b
nc.createVariable('Tcline', 'f')
nc.variables['Tcline'][:] = Tcline
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['ocean_time'].calendar = 'LEAP'
nc.variables['ocean_time'].long_name = 'ocean time'
# nc.variables['ocean_time'][:] = time
nc.createVariable(outvarname, 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname].field = field
nc.variables[outvarname].time = 'ocean_time'
nc.variables[outvarname].coordinates = 'ocean_time s_rho lon_v lat_v'
print 'Done with header for file %s' %name
# get Pacific data from 1991 to 2008
#year = 1991
retry='True'
spval = 1.e30
rec_start = (year-1948)*120
rec_end = rec_start + 120
invarname = 'v'
outvarname = 'v'
#read grid and variable attributes from the first file
#url='http://viz.clusters.umaine.edu:8080/thredds/dodsC/pacific/1991-2008'
url='http://viz.clusters.umaine.edu:8080/thredds/dodsC/pacific/roms50-avg'
dataset = netCDF4.Dataset(url)
zeta0 = dataset.variables['zeta'][0,170:215,195:265]
#lon = dataset.variables['lon_v'][170:215-1,195:265]
#lat = dataset.variables['lat_v'][170:215-1,195:265]
#theta_s = dataset.variables['theta_s'][:]
#theta_b = dataset.variables['theta_b'][:]
#Tcline = dataset.variables['Tcline'][:]
#s_rho = dataset.variables['s_rho'][:]
time = dataset.variables['scrum_time'][:]
#spval = dataset.variables[invarname]._FillValue
units = dataset.variables[invarname].units
field = dataset.variables[invarname].field
long_name = dataset.variables[invarname].long_name
#dataset.close()
retry_day = []
# loop over records, 73 hours apart
outfile = 'data/Pacific_%s_%04d.nc' %(outvarname,year)
create_HYCOM_file(outfile)
day_out = 0
for day in range(rec_start,rec_end):
print 'Processing file for %s, day %d, year %04d' %(invarname, day_out*3, year)
#get data from server
try:
var = dataset.variables[invarname][day,:,170:215-1,195:265]
# spval = var.get_fill_value()
# dataset.close()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
retry_day.append((day,day_out))
continue
#create netCDF file
nc.variables[outvarname][day_out] = var
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day_out*73/24.
nc.variables['ocean_time'][day_out] = jday
day_out += 1
if retry == 'True':
if len(retry_day) != 0:
print "Some file have not been downloded... Let's try again"
while len(retry_day) != 0:
for (day,day_out) in retry_day:
print 'Retry file for %s, day %03d, year %04d' %(invarname, day_out, year)
#get data from server
try:
var = dataset.variables[invarname][day,:,170:215-1,195:265]
# spval = var.get_fill_value()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
continue
#create netCDF file
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day_out*73/24.
nc.variables[outvarname][day_out] = var
nc.variables['ocean_time'][day_out] = jday
retry_day.remove((day,day_out))
dataset.close()
nc.close()
| bsd-3-clause |
planetarymike/IDL-Colorbars | IDL_py_test/042_CB-Dark2.py | 1 | 8651 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0.105882, 0.619608, 0.466667],
[0.12549, 0.611765, 0.454902],
[0.14902, 0.607843, 0.443137],
[0.168627, 0.6, 0.427451],
[0.188235, 0.592157, 0.415686],
[0.207843, 0.584314, 0.403922],
[0.231373, 0.580392, 0.392157],
[0.25098, 0.572549, 0.376471],
[0.270588, 0.564706, 0.364706],
[0.294118, 0.556863, 0.352941],
[0.313725, 0.552941, 0.341176],
[0.333333, 0.545098, 0.32549],
[0.352941, 0.537255, 0.313725],
[0.376471, 0.529412, 0.301961],
[0.396078, 0.52549, 0.290196],
[0.415686, 0.517647, 0.27451],
[0.435294, 0.509804, 0.262745],
[0.458824, 0.501961, 0.25098],
[0.478431, 0.498039, 0.239216],
[0.498039, 0.490196, 0.223529],
[0.521569, 0.482353, 0.211765],
[0.541176, 0.47451, 0.2],
[0.560784, 0.470588, 0.188235],
[0.580392, 0.462745, 0.172549],
[0.603922, 0.454902, 0.160784],
[0.623529, 0.447059, 0.14902],
[0.643137, 0.443137, 0.137255],
[0.666667, 0.435294, 0.121569],
[0.686275, 0.427451, 0.109804],
[0.705882, 0.419608, 0.0980392],
[0.72549, 0.415686, 0.0862745],
[0.74902, 0.407843, 0.0705882],
[0.768627, 0.4, 0.0588235],
[0.788235, 0.392157, 0.0470588],
[0.807843, 0.388235, 0.0313725],
[0.831373, 0.380392, 0.0196078],
[0.85098, 0.372549, 0.00784314],
[0.839216, 0.372549, 0.027451],
[0.827451, 0.376471, 0.0470588],
[0.819608, 0.376471, 0.0666667],
[0.807843, 0.380392, 0.0862745],
[0.796078, 0.380392, 0.105882],
[0.784314, 0.384314, 0.12549],
[0.776471, 0.384314, 0.141176],
[0.764706, 0.388235, 0.160784],
[0.752941, 0.388235, 0.180392],
[0.741176, 0.392157, 0.2],
[0.729412, 0.392157, 0.219608],
[0.721569, 0.396078, 0.239216],
[0.709804, 0.396078, 0.258824],
[0.698039, 0.4, 0.278431],
[0.686275, 0.4, 0.298039],
[0.678431, 0.403922, 0.317647],
[0.666667, 0.403922, 0.337255],
[0.654902, 0.407843, 0.356863],
[0.643137, 0.407843, 0.372549],
[0.631373, 0.407843, 0.392157],
[0.623529, 0.411765, 0.411765],
[0.611765, 0.411765, 0.431373],
[0.6, 0.415686, 0.45098],
[0.588235, 0.415686, 0.470588],
[0.580392, 0.419608, 0.490196],
[0.568627, 0.419608, 0.509804],
[0.556863, 0.423529, 0.529412],
[0.545098, 0.423529, 0.54902],
[0.533333, 0.427451, 0.568627],
[0.52549, 0.427451, 0.588235],
[0.513725, 0.431373, 0.603922],
[0.501961, 0.431373, 0.623529],
[0.490196, 0.435294, 0.643137],
[0.482353, 0.435294, 0.662745],
[0.470588, 0.439216, 0.682353],
[0.458824, 0.439216, 0.701961],
[0.470588, 0.431373, 0.698039],
[0.482353, 0.423529, 0.694118],
[0.494118, 0.415686, 0.690196],
[0.505882, 0.407843, 0.686275],
[0.517647, 0.4, 0.678431],
[0.529412, 0.392157, 0.67451],
[0.545098, 0.388235, 0.670588],
[0.556863, 0.380392, 0.666667],
[0.568627, 0.372549, 0.662745],
[0.580392, 0.364706, 0.658824],
[0.592157, 0.356863, 0.654902],
[0.603922, 0.34902, 0.65098],
[0.615686, 0.341176, 0.647059],
[0.627451, 0.333333, 0.639216],
[0.639216, 0.32549, 0.635294],
[0.65098, 0.317647, 0.631373],
[0.662745, 0.309804, 0.627451],
[0.67451, 0.301961, 0.623529],
[0.690196, 0.298039, 0.619608],
[0.701961, 0.290196, 0.615686],
[0.713725, 0.282353, 0.611765],
[0.72549, 0.27451, 0.607843],
[0.737255, 0.266667, 0.603922],
[0.74902, 0.258824, 0.596078],
[0.760784, 0.25098, 0.592157],
[0.772549, 0.243137, 0.588235],
[0.784314, 0.235294, 0.584314],
[0.796078, 0.227451, 0.580392],
[0.807843, 0.219608, 0.576471],
[0.819608, 0.211765, 0.572549],
[0.835294, 0.207843, 0.568627],
[0.847059, 0.2, 0.564706],
[0.858824, 0.192157, 0.556863],
[0.870588, 0.184314, 0.552941],
[0.882353, 0.176471, 0.54902],
[0.894118, 0.168627, 0.545098],
[0.905882, 0.160784, 0.541176],
[0.890196, 0.172549, 0.529412],
[0.878431, 0.188235, 0.517647],
[0.862745, 0.2, 0.505882],
[0.85098, 0.215686, 0.494118],
[0.835294, 0.227451, 0.482353],
[0.823529, 0.243137, 0.470588],
[0.807843, 0.254902, 0.458824],
[0.792157, 0.270588, 0.447059],
[0.780392, 0.282353, 0.435294],
[0.764706, 0.298039, 0.423529],
[0.752941, 0.309804, 0.411765],
[0.737255, 0.32549, 0.4],
[0.721569, 0.337255, 0.388235],
[0.709804, 0.352941, 0.376471],
[0.694118, 0.364706, 0.364706],
[0.682353, 0.380392, 0.352941],
[0.666667, 0.392157, 0.341176],
[0.654902, 0.407843, 0.329412],
[0.639216, 0.419608, 0.317647],
[0.623529, 0.431373, 0.305882],
[0.611765, 0.447059, 0.294118],
[0.596078, 0.458824, 0.282353],
[0.584314, 0.47451, 0.270588],
[0.568627, 0.486275, 0.258824],
[0.552941, 0.501961, 0.247059],
[0.541176, 0.513725, 0.235294],
[0.52549, 0.529412, 0.223529],
[0.513725, 0.541176, 0.211765],
[0.498039, 0.556863, 0.2],
[0.486275, 0.568627, 0.188235],
[0.470588, 0.584314, 0.176471],
[0.454902, 0.596078, 0.164706],
[0.443137, 0.611765, 0.152941],
[0.427451, 0.623529, 0.141176],
[0.415686, 0.639216, 0.129412],
[0.4, 0.65098, 0.117647],
[0.411765, 0.65098, 0.113725],
[0.427451, 0.65098, 0.109804],
[0.439216, 0.65098, 0.109804],
[0.454902, 0.654902, 0.105882],
[0.466667, 0.654902, 0.101961],
[0.482353, 0.654902, 0.0980392],
[0.494118, 0.654902, 0.0980392],
[0.509804, 0.654902, 0.0941176],
[0.521569, 0.654902, 0.0901961],
[0.537255, 0.654902, 0.0862745],
[0.54902, 0.654902, 0.0862745],
[0.564706, 0.658824, 0.0823529],
[0.576471, 0.658824, 0.0784314],
[0.588235, 0.658824, 0.0745098],
[0.603922, 0.658824, 0.0745098],
[0.615686, 0.658824, 0.0705882],
[0.631373, 0.658824, 0.0666667],
[0.643137, 0.658824, 0.0627451],
[0.658824, 0.662745, 0.0627451],
[0.670588, 0.662745, 0.0588235],
[0.686275, 0.662745, 0.054902],
[0.698039, 0.662745, 0.0509804],
[0.713725, 0.662745, 0.0509804],
[0.72549, 0.662745, 0.0470588],
[0.737255, 0.662745, 0.0431373],
[0.752941, 0.666667, 0.0392157],
[0.764706, 0.666667, 0.0392157],
[0.780392, 0.666667, 0.0352941],
[0.792157, 0.666667, 0.0313725],
[0.807843, 0.666667, 0.027451],
[0.819608, 0.666667, 0.027451],
[0.835294, 0.666667, 0.0235294],
[0.847059, 0.666667, 0.0196078],
[0.862745, 0.670588, 0.0156863],
[0.87451, 0.670588, 0.0156863],
[0.890196, 0.670588, 0.0117647],
[0.901961, 0.670588, 0.00784314],
[0.894118, 0.666667, 0.0117647],
[0.886275, 0.658824, 0.0156863],
[0.882353, 0.654902, 0.0156863],
[0.87451, 0.647059, 0.0196078],
[0.866667, 0.643137, 0.0235294],
[0.858824, 0.635294, 0.027451],
[0.854902, 0.631373, 0.027451],
[0.847059, 0.623529, 0.0313725],
[0.839216, 0.619608, 0.0352941],
[0.831373, 0.611765, 0.0392157],
[0.823529, 0.607843, 0.0392157],
[0.819608, 0.6, 0.0431373],
[0.811765, 0.596078, 0.0470588],
[0.803922, 0.588235, 0.0509804],
[0.796078, 0.584314, 0.0509804],
[0.792157, 0.576471, 0.054902],
[0.784314, 0.572549, 0.0588235],
[0.776471, 0.568627, 0.0627451],
[0.768627, 0.560784, 0.0627451],
[0.760784, 0.556863, 0.0666667],
[0.756863, 0.54902, 0.0705882],
[0.74902, 0.545098, 0.0745098],
[0.741176, 0.537255, 0.0745098],
[0.733333, 0.533333, 0.0784314],
[0.729412, 0.52549, 0.0823529],
[0.721569, 0.521569, 0.0862745],
[0.713725, 0.513725, 0.0862745],
[0.705882, 0.509804, 0.0901961],
[0.698039, 0.501961, 0.0941176],
[0.694118, 0.498039, 0.0980392],
[0.686275, 0.490196, 0.0980392],
[0.678431, 0.486275, 0.101961],
[0.670588, 0.478431, 0.105882],
[0.666667, 0.47451, 0.109804],
[0.658824, 0.466667, 0.109804],
[0.65098, 0.462745, 0.113725],
[0.643137, 0.462745, 0.121569],
[0.639216, 0.458824, 0.129412],
[0.631373, 0.458824, 0.137255],
[0.623529, 0.454902, 0.145098],
[0.615686, 0.454902, 0.152941],
[0.611765, 0.45098, 0.160784],
[0.603922, 0.45098, 0.168627],
[0.596078, 0.45098, 0.176471],
[0.588235, 0.447059, 0.184314],
[0.584314, 0.447059, 0.192157],
[0.576471, 0.443137, 0.2],
[0.568627, 0.443137, 0.207843],
[0.564706, 0.439216, 0.215686],
[0.556863, 0.439216, 0.223529],
[0.54902, 0.439216, 0.231373],
[0.541176, 0.435294, 0.239216],
[0.537255, 0.435294, 0.247059],
[0.529412, 0.431373, 0.254902],
[0.521569, 0.431373, 0.258824],
[0.513725, 0.427451, 0.266667],
[0.509804, 0.427451, 0.27451],
[0.501961, 0.423529, 0.282353],
[0.494118, 0.423529, 0.290196],
[0.486275, 0.423529, 0.298039],
[0.482353, 0.419608, 0.305882],
[0.47451, 0.419608, 0.313725],
[0.466667, 0.415686, 0.321569],
[0.462745, 0.415686, 0.329412],
[0.454902, 0.411765, 0.337255],
[0.447059, 0.411765, 0.345098],
[0.439216, 0.411765, 0.352941],
[0.435294, 0.407843, 0.360784],
[0.427451, 0.407843, 0.368627],
[0.419608, 0.403922, 0.376471],
[0.411765, 0.403922, 0.384314],
[0.407843, 0.4, 0.392157],
[0.4, 0.4, 0.4]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
rseubert/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
rsivapr/scikit-learn | examples/neighbors/plot_classification.py | 8 | 1769 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
pl.show()
| bsd-3-clause |
myinxd/agn-ae | utils/sample-class.py | 1 | 2956 | # Copyright (C) 2017 Zhixian MA <zxma_sjtu@qq.com>
"""
Rename samples of Best into the JHHMMSS.ss+/-DDMMSS.s style
Reference
=========
[1] math.modf
http://www.runoob.com/python/func-number-modf.html
"""
import os
import math
import numpy as np
import time
import argparse
import pickle
def batch_class_csv(listpath,batch):
"""Batchly rename the samples
Inputs
======
listpath: str
The path of the data list
"""
from pandas import read_csv
from astropy import units as u
from astropy.coordinates import SkyCoord
import time
sample_cls = {"1": [],
"2": [],
"3": [],
"4": []}
names = []
# load csv
f = read_csv(listpath, sep=' ')
ra = f['RAJ2000'] # RA
dec = f['DEJ2000'] # DEC
rcl = f['RCl']
# regularize the batch
if batch[1] > len(f):
batch[1] = len(f)
# log file optional
fl = open('log.txt', 'a')
# Iteration body
for i in range(batch[0], batch[1]+1):
# timestamp
t = time.strftime('%Y-%m-%d',time.localtime(time.time()))
# get params
temp_c = SkyCoord(ra=ra[i]*u.degree, dec=dec[i]*u.degree, frame='icrs')
# Coordinate transform
ra_rms = tuple(temp_c.ra.hms)
dec_dms = tuple(temp_c.dec.dms)
# save name
ra_h = "%02d" % (int(ra_rms[0]))
ra_m = "%02d" % (int(ra_rms[1]))
ra_s_i = np.fix(np.round(ra_rms[2]*100)/100)
ra_s_f = np.round(ra_rms[2]*100)/100 - ra_s_i
ra_s = "%02d.%02d" % (int(ra_s_i),int(ra_s_f*100))
if dec_dms[0] > 0:
de_d = "+%02d" % (int(dec_dms[0]))
else:
de_d = "-%02d" % (abs(int(dec_dms[0])))
de_m = "%02d" % (abs(int(dec_dms[1])))
de_s_i = np.fix(np.abs(np.round(dec_dms[2]*10)/10))
de_s_f = np.abs(np.round(dec_dms[2]*10)/10) - de_s_i
de_s = "%02d.%01d" % (int(de_s_i),np.round(de_s_f*10))
fname = 'J' + ''.join([ra_h,ra_m,ra_s,de_d,de_m,de_s]) + '.fits'
names.append(fname)
try:
print("[%s] s: %s type: %s" % (t, fname, rcl[i]))
sample_cls[str(rcl[i])].append(i)
except:
fl.write("%d: %s" % (i, fname))
continue
# print log
# print('[%s]: Fetching %s' % (t, fname))
fl.close()
return sample_cls,names
def main():
# Init
parser = argparse.ArgumentParser(description="Rename FIRST observations.")
# Parameters
# parser.add_argument("url", help="URL of the archive'")
parser.add_argument("listpath", help="Path of the sample list.")
args = parser.parse_args()
listpath = args.listpath
batch = [0,18284]
sample_cls,names = batch_class_csv(listpath=listpath,
batch=batch)
with open("../sample_cls_names.pkl",'wb') as fp:
pickle.dump({"names":names, "labels":sample_cls},fp)
if __name__ == "__main__":
main()
| mit |
amueller/advanced_training | mglearn/plot_improper_preprocessing.py | 2 | 3016 | import matplotlib.pyplot as plt
def make_bracket(s, xy, textxy, width, ax):
annotation = ax.annotate(
s, xy, textxy, ha="center", va="center", size=20,
arrowprops=dict(arrowstyle="-[", fc="w", ec="k",
lw=2,), bbox=dict(boxstyle="square", fc="w"))
annotation.arrow_patch.get_arrowstyle().widthB = width
def plot_improper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 10))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9], left=[0, 12, 15],
color=['white', 'grey', 'grey'], hatch="//")
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 6)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(13.5, -.3, "validation fold",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(17.5, -.3, "test set",
fontdict={'fontsize': 14}, horizontalalignment="center")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
def plot_proper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 8))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9],
left=[0, 12, 15], color=['white', 'grey', 'grey'], hatch="//")
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 4.5)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds", fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(13.5, -.3, "validation fold", fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(17.5, -.3, "test set", fontdict={'fontsize': 14}, horizontalalignment="center")
make_bracket("scaler fit", (6, 1.3), (6, 2.), 12, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
fig.subplots_adjust(hspace=.3)
| bsd-2-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Normal_Contact/Normal_Behviour/SoftContact_ElPPlShear/plot.py | 6 | 1583 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
################ Node # 2 Displacement #############################
#######################################
## Analytical Solution
#######################################
finput = h5py.File('Analytical_Solution.feioutput')
plt.figure()
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
plt.plot(normal_strain,normal_stress,'-r',Linewidth=4,label='Analytical Solution')
plt.hold(True)
#######################################
## Current Solution
#######################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4,label='Numerical Solution')
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.legend()
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
# plt.show()
# #####################################################################
| cc0-1.0 |
sam81/pychoacoustics | tests/test_transformed_up_down_interleaved.py | 1 | 3880 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy, os, sys, unittest
import pandas as pd
from test_utility_functions import*
rootPath = "../../pychoacoustics_data/test_data/"
class TestTransformedUpDown(unittest.TestCase):
def testGeometric(self):
resFileRoot = "res_geometric"
removePreviousResFiles(rootPath + "transformed_up-down_interleaved/"+resFileRoot)
cmdStr = "python3 ../pychoacoustics.pyw -f" + rootPath + "transformed_up-down_interleaved/transformed_up-down_interleaved_geometric.prm -a -q -o --seed 2033 -r" + rootPath + "transformed_up-down_interleaved/"+resFileRoot + ".txt"
x = os.system(cmdStr)
assert(x==0)
storedRes = pd.read_csv(rootPath + "transformed_up-down_interleaved/results/res_geometric_table.csv", sep=";")
currRes = pd.read_csv(rootPath + "transformed_up-down_interleaved/res_geometric_table.csv", sep=";")
numpy.testing.assert_array_equal(currRes["threshold_geometric_track1"], storedRes["threshold_geometric_track1"])
numpy.testing.assert_array_equal(currRes["SD_track1"], storedRes["SD_track1"])
numpy.testing.assert_array_equal(currRes["threshold_geometric_track2"], storedRes["threshold_geometric_track2"])
numpy.testing.assert_array_equal(currRes["SD_track2"], storedRes["SD_track2"])
storedResProc = pd.read_csv(rootPath + "transformed_up-down_interleaved/results/res_geometric_table_sess.csv", sep=";")
currResProc = pd.read_csv(rootPath + "transformed_up-down_interleaved/res_geometric_table_sess.csv", sep=";")
numpy.testing.assert_array_equal(currResProc["threshold_geometric_track1"], storedResProc["threshold_geometric_track1"])
numpy.testing.assert_array_equal(currResProc["SE_track1"], storedResProc["SE_track1"])
numpy.testing.assert_array_equal(currResProc["threshold_geometric_track2"], storedResProc["threshold_geometric_track2"])
numpy.testing.assert_array_equal(currResProc["SE_track2"], storedResProc["SE_track2"])
def testArithmetic(self):
resFileRoot = "res_arithmetic"
removePreviousResFiles(rootPath + "transformed_up-down_interleaved/"+resFileRoot)
cmdStr = "python3 ../pychoacoustics.pyw -f" + rootPath + "transformed_up-down_interleaved/transformed_up-down_interleaved_arithmetic.prm -a -q -o --seed 2033 -r" + rootPath + "transformed_up-down_interleaved/"+resFileRoot + ".txt"
x = os.system(cmdStr)
assert(x==0)
storedRes = pd.read_csv(rootPath + "transformed_up-down_interleaved/results/res_arithmetic_table.csv", sep=";")
currRes = pd.read_csv(rootPath + "transformed_up-down_interleaved/res_arithmetic_table.csv", sep=";")
numpy.testing.assert_array_equal(currRes["threshold_arithmetic_track1"], storedRes["threshold_arithmetic_track1"])
numpy.testing.assert_array_equal(currRes["SD_track1"], storedRes["SD_track1"])
numpy.testing.assert_array_equal(currRes["threshold_arithmetic_track2"], storedRes["threshold_arithmetic_track2"])
numpy.testing.assert_array_equal(currRes["SD_track2"], storedRes["SD_track2"])
storedResProc = pd.read_csv(rootPath + "transformed_up-down_interleaved/results/res_arithmetic_table_sess.csv", sep=";")
currResProc = pd.read_csv(rootPath + "transformed_up-down_interleaved/res_arithmetic_table_sess.csv", sep=";")
numpy.testing.assert_array_equal(currResProc["threshold_arithmetic_track1"], storedResProc["threshold_arithmetic_track1"])
numpy.testing.assert_array_equal(currResProc["SE_track1"], storedResProc["SE_track1"])
numpy.testing.assert_array_equal(currResProc["threshold_arithmetic_track2"], storedResProc["threshold_arithmetic_track2"])
numpy.testing.assert_array_equal(currResProc["SE_track2"], storedResProc["SE_track2"])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
maxlit/powerindex | powerindex/powerindex.py | 1 | 12803 | import math
import itertools as it
class Party:
def __init__(self,weight,name):
self.weight=weight
self.name=name
def __eq__(self,other):
if self.name==other.name:
return True
else:
return False
def __ne__(self,other):
return not (self==other)
def __str__(self):
return "Party %s with weight %s"%(self.name,self.weight)
def __repr__(self):
return self.__str__()
class Game:
def __init__(self,quota,weights=None,parties=None):
if parties is None:
if weights is None:
raise TypeError("You need to put weights or parties as parameter while initializing the game")
else:
self.weights=weights
self.parties=[Party(weights[i],i) for i in range(len(weights))]
else:
self.parties=parties
self.weights=[party.weight for party in parties]
self.quota=quota
self.N=len(self.weights)
self.banzhaf=None
self.shapley_shubik=None
self.s_weights=float(sum(self.weights))
self.nominal=[weight/self.s_weights for weight in self.weights]
def __len__(self):
return len(self.parties)
def __str__(self):
return "The game consists of %s parties, the threshold is %s"%(len(self.parties),self.quota)
"""
This function launches calculation of all implemented indeces
"""
def calc(self):
# find if there's a party with seats greater or equal to quota
ge_quota=map(lambda x: 1 if x>=self.quota else 0,self.weights)
num_ge_quota=sum(ge_quota)
if num_ge_quota==0: # if not calculate according to algos
self.calc_banzhaf()
self.calc_shapley_shubik()
else: # if yes manually assign all power to the party (parties)
self.banzhaf=map(lambda x: x/float(num_ge_quota),ge_quota)
self.shapley_shubik=map(lambda x: x/float(num_ge_quota),ge_quota)
"""
Computes Banzhaf power index using generating function approach.
"""
def calc_banzhaf(self):
coeffs=self._coeffs_of_general_GF("Banzhaf")
pows=[sum(self._coeffs_of_player_GF(coeffs,weight,"Banzhaf")[(self.quota-weight):self.quota]) for weight in self.weights]
s_pows=float(sum(pows))
self.banzhaf=list(map(lambda x: x/s_pows,pows))
def calc_shapley_shubik(self):
coeffs=self._coeffs_of_general_GF("ShapleyShubik")
pows=[0 for n in range(self.N)]
i=0
n_fac=float(math.factorial(self.N))
for weight in self.weights:
c=self._coeffs_of_player_GF(coeffs,weight,"ShapleyShubik")
for z in range(self.N):
relevant=c[z][(self.quota-weight):self.quota]
binomial_coefficient=math.factorial(z)*math.factorial(self.N-z-1)/n_fac
if len(relevant)>0:
pows[i]+=sum(relevant)*binomial_coefficient
i+=1
#pl_coeffs=[self._coeffs_of_player_GF(coeffs,weight,"ShapleyShubik")[(self.quota-weight):self.quota][0:(self.N)] for weight in self.weights]
#pows=map(lambda x: sum(*x),pl_coeffs)
#pows=[sum(lambda x: sum(x),self._coeffs_of_player_GF(coeffs,weight,"ShapleyShubik")[(self.quota-weight):self.quota][0:(self.N)])) for weight in self.weights]
s_pows=float(sum(pows))
self.shapley_shubik=list(map(lambda x: x/s_pows,pows))
"""
Computes coefficients of the generating function of the game.
"""
def _coeffs_of_general_GF(self,index):
if index=="Banzhaf":
return self._coeffs_of_general_GF_bf()
elif index=="ShapleyShubik":
return self._coeffs_of_general_GF_sh()
def _coeffs_of_general_GF_bf(self):
N=len(self.weights)
W=sum(self.weights)
coeffs=self.GF_coeffs(N,W,"Banzhaf")
for j in range(1,N+1):
for k in range(1,W+1):
if k<self.weights[j-1]:
coeffs[j].append(coeffs[j-1][k])
else:
coeffs[j].append(coeffs[j-1][k]+coeffs[j-1][k-self.weights[j-1]])
if j>2:
coeffs[j-2]=None # free memory
return coeffs[-1]
def _coeffs_of_general_GF_sh(self):
N=len(self.weights)
W=sum(self.weights)
q=self.quota
coeffs=self.GF_coeffs(N,q,"ShapleyShubik")
for i in range(1,N+1):
for k in range(1,i+1):
coeffs[i].append([])
for y in range(0,q):
if y<self.weights[i-1]:
if k==i:
coeffs[i][k].append(0)
else:
coeffs[i][k].append(coeffs[i-1][k][y])
else:
if k==i:
coeffs[i][k].append(coeffs[i-1][k-1][y-self.weights[i-1]])
else:
coeffs[i][k].append(coeffs[i-1][k][y]+coeffs[i-1][k-1][y-self.weights[i-1]])
if k>2:
pass#coeffs[i][k-2]=None # free memory
if i>2:
pass#coeffs[i-2]=None # free memory
return coeffs[-1]
def GF_coeffs(self,N,q,index):
if index=="ShapleyShubik":
coeffs=[[[1]+[0 for y in range(1,q+1)]] for j in range(N+1)]# a[j][0][0]=1
#coeffs[0]=[[0 for k in range(N+1)] for y in range(q+1)]# a[0][k][y]=0
coeffs[0]=[[0 for y in range(q+1)] for k in range(N+1)]# a[0][k][y]=0
coeffs[0][0][0]=1
return coeffs
elif index=="Banzhaf":
coeffs=[[1] for i in range(N+1)] # makes a[j][0]=1
coeffs[0]=[0 for i in range(q+1)]# makes a[0][k]=0
coeffs[0][0]=1
return coeffs
def _coeffs_of_player_GF(self,coeffs,w,index):
if index=="Banzhaf":
return self._coeffs_of_player_GF_bf(coeffs,w)
elif index=="ShapleyShubik":
return self._coeffs_of_player_GF_sh(coeffs,w)
def _coeffs_of_player_GF_bf(self,coeffs,w):
W=len(coeffs)-1
c=[0 for i in range(W)]
for k in range(0,W):
if k<w:
c[k]=coeffs[k]
else:
c[k]=coeffs[k]-c[k-w]
return c
def _coeffs_of_player_GF_sh(self,coeffs,w):
c=[[] for i in range(self.N)]
c[0]=[0 for i in range(self.quota)]
c[0][0]=1
for n in range(1,self.N):
for k in range(0,self.quota):
if k<w:
c[n].append(coeffs[n][k])
else:
c[n].append(coeffs[n][k]-c[n-1][k-w])
return c
def pie_chart(self,indices=["banzhaf","shapley"],fname=None,show=True):
hashed_parties=zip(range(self.N),self.parties) # assign a number to each parties in order to match their power indices later on
ordered_hashed_parties=sorted(hashed_parties,key=lambda x: x[1].weight)
#median=ordered_hashed_parties[self.N/2][1].weight
#ordered_hashed_parties=sorted(ordered_hashed_parties,key=lambda x: abs(x[1].weight-median)) # mix big and small parties
if self.N>3:# receipt from http://nxn.se/post/46440196846/making-nicer-looking-pie-charts-with-matplotlib
large = ordered_hashed_parties[:int(self.N / 2)]
small = ordered_hashed_parties[int(self.N / 2):]
reordered_hashed_parties = large[::2] + small[::2] + large[1::2] + small[1::2]
angle = 310
else:
reordered_hashed_parties=ordered_hashed_parties
angle=90
reordered_parties=[el[1] for el in reordered_hashed_parties]
pow_indices={}
if "shapley" in indices and self.shapley_shubik is not None:
pow_indices["Shapley-Shubik Power Index"]=[self.shapley_shubik[el[0]] for el in reordered_hashed_parties]
if "banzhaf" in indices and self.banzhaf is not None:
pow_indices["Banzhaf Power Index"]=[self.banzhaf[el[0]] for el in reordered_hashed_parties]
if "nominal" in indices and self.nominal is not None:
pow_indices["% of seats"]=[self.nominal[el[0]] for el in reordered_hashed_parties]
try:
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize']=12,9
plt.rcParams['font.size']=16
plt.rcParams['axes.titlesize']=24
plt.rcParams['font.family']='fantasy'
plt.rcParams['font.weight']='semibold'
#plt.rcParams['savefig.dpi']=200
from matplotlib.gridspec import GridSpec
from matplotlib.colors import ColorConverter
CC=ColorConverter()
except ImportError as ex:
print("plot() function requires matplotlib library which is not installed on your computer")
raise ex
# make a pie chart look nices as described here: http://nxn.se/post/46440196846/making-nicer-looking-pie-charts-with-matplotlib
#colors_cycle=it.cycle('bgrmcyk')# blue, green, red, ...
#colors=[colors_cycle.next() for weight in self.weights]
gray_scales=[i/100.0 for i in sorted(range(50,100,10),reverse=True)]
rgb_colors={
"Maroon":"#800000", #(128,0,0),
"Olive":"#808000", #(128,128,0),
"Pale Green":"#98FB98", #(152,251,152),
"Teal":"#008080", #(0,128,128),
"Peru": "#CD853F",#(205,133,63),
"Steel Blue":"#4682B4", #(70,130,180),
"Slate Blue":"#6A5ACD", #(106,90,205),
"Pale Violet Red":"#DB7093", #(219,112,147),
"Khaki":"#F0E68C", #(240,230,140),
"Thistle":"#D8BFD8" #(216,191,216)
}
design="color"
if design=="gray":
colors_cycle=it.cycle(gray_scales)# blue, green, red, ...
colors_raw=[next(colors_cycle) for weight in self.weights]
if colors_raw[0]==colors_raw[-1]:
colors_raw[-1]-=0.1
colors=[CC.to_rgb(str(color)) for color in colors_raw]
else:
colors_cycle=it.cycle(rgb_colors.values())# blue, green, red, ...
colors_raw=[next(colors_cycle) for weight in self.weights]
colors=colors_raw
I=len(pow_indices)
the_grid = GridSpec(1, I)
i=0
labels=[player.name for player in reordered_parties]
labels_raw=[str(player.name) for player in reordered_parties]
labels=[]
for label in labels_raw:# if the name of the party is too long split it in two lines
if len(label)>24:
spl=label.split(' ')
L=len(spl)
if L>1: # if 2 words or more than split in 2 lines
label="%s\n %s"%(' '.join(spl[:3*L/4]),' '.join(spl[3*L/4:]))
labels.append(label)
for name in pow_indices:
#labels=["%s (%s)"%(reordered_parties[i].name,round(pow_indices[name][i]*100,1)) for i in range(self.N)]
ax=plt.subplot(the_grid[0, i], aspect=1)
try:
plt.pie(pow_indices[name], labels=labels, labeldistance=1.1
, colors=colors ,autopct='%1.1f%%',startangle=angle,
pctdistance=0.8)
except TypeError:
pass
ax.set_title(name,bbox={'facecolor':'0.8', 'pad':5},fontweight='bold')
i+=1
if fname is not None:
plt.savefig(fname)
plt.close()
if show:
plt.show()
def save_as_csv(self,full_filename,indices=["banzhaf","shapley","nominal"]):
pow_indices={}
if "shapley" in indices and self.shapley_shubik is not None:
pow_indices["Shapley-Shubik Power Index"]=self.shapley_shubik
if "banzhaf" in indices and self.banzhaf is not None:
pow_indices["Banzhaf Power Index"]=self.banzhaf
if "nominal" in indices and self.nominal is not None:
pow_indices["% of seats"]=self.nominal
output="Party;Seats;"+";".join(pow_indices.keys())+"\n"
c=0
for party in parties:
output+="%s;%s"%(party.name,party.weight)
for name in pow_indices:
output+=";%s"%str(pow_indices[name][c])
output+="\n"
c+=1
f=open(full_filename,'w')
f.write(output)
f.close()
def hist():
# to do
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
print("not implemented yet")
| mit |
vighneshbirodkar/scikit-image | doc/examples/features_detection/plot_windowed_histogram.py | 4 | 5150 | """
========================
Sliding window histogram
========================
Histogram matching can be used for object detection in images [1]_. This
example extracts a single coin from the ``skimage.data.coins`` image and uses
histogram matching to attempt to locate it within the original image.
First, a box-shaped region of the image containing the target coin is
extracted and a histogram of its greyscale values is computed.
Next, for each pixel in the test image, a histogram of the greyscale values in
a region of the image surrounding the pixel is computed.
``skimage.filters.rank.windowed_histogram`` is used for this task, as it employs
an efficient sliding window based algorithm that is able to compute these
histograms quickly [2]_. The local histogram for the region surrounding each
pixel in the image is compared to that of the single coin, with a similarity
measure being computed and displayed.
The histogram of the single coin is computed using ``numpy.histogram`` on a box
shaped region surrounding the coin, while the sliding window histograms are
computed using a disc shaped structural element of a slightly different size.
This is done in aid of demonstrating that the technique still finds similarity
in spite of these differences.
To demonstrate the rotational invariance of the technique, the same test is
performed on a version of the coins image rotated by 45 degrees.
References
----------
.. [1] Porikli, F. "Integral Histogram: A Fast Way to Extract Histograms
in Cartesian Spaces" CVPR, 2005. Vol. 1. IEEE, 2005
.. [2] S.Perreault and P.Hebert. Median filtering in constant time.
Trans. Image Processing, 16(9):2389-2394, 2007.
"""
from __future__ import division
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, transform
from skimage.util import img_as_ubyte
from skimage.morphology import disk
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def windowed_histogram_similarity(image, selem, reference_hist, n_bins):
# Compute normalized windowed histogram feature vector for each pixel
px_histograms = rank.windowed_histogram(image, selem, n_bins=n_bins)
# Reshape coin histogram to (1,1,N) for broadcast when we want to use it in
# arithmetic operations with the windowed histograms from the image
reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape)
# Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y));
# a measure of distance between histograms
X = px_histograms
Y = reference_hist
num = (X - Y) ** 2
denom = X + Y
denom[denom == 0] = np.infty
frac = num / denom
chi_sqr = 0.5 * np.sum(frac, axis=2)
# Generate a similarity measure. It needs to be low when distance is high
# and high when distance is low; taking the reciprocal will do this.
# Chi squared will always be >= 0, add small value to prevent divide by 0.
similarity = 1 / (chi_sqr + 1.0e-4)
return similarity
# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())
# Quantize to 16 levels of greyscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16
# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148] # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
coin_coords[0]:coin_coords[2]]
# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)
# Compute a disk shaped mask that will define the shape of our sliding window
# Example coin is ~44px across, so make a disk 61px wide (2 * rad + 1) to be
# big enough for other coins too.
selem = disk(30)
# Compute the similarity across the complete image
similarity = windowed_histogram_similarity(quantized_img, selem, coin_hist,
coin_hist.shape[0])
# Now try a rotated image
rotated_img = img_as_ubyte(transform.rotate(img, 45.0, resize=True))
# Quantize to 16 levels as before
quantized_rotated_image = rotated_img // 16
# Similarity on rotated image
rotated_similarity = windowed_histogram_similarity(quantized_rotated_image,
selem, coin_hist,
coin_hist.shape[0])
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
axes[0, 0].imshow(quantized_img, cmap='gray')
axes[0, 0].set_title('Quantized image')
axes[0, 0].axis('off')
axes[0, 1].imshow(coin, cmap='gray')
axes[0, 1].set_title('Coin from 2nd row, 4th column')
axes[0, 1].axis('off')
axes[1, 0].imshow(img, cmap='gray')
axes[1, 0].imshow(similarity, cmap='hot', alpha=0.5)
axes[1, 0].set_title('Original image with overlaid similarity')
axes[1, 0].axis('off')
axes[1, 1].imshow(rotated_img, cmap='gray')
axes[1, 1].imshow(rotated_similarity, cmap='hot', alpha=0.5)
axes[1, 1].set_title('Rotated image with overlaid similarity')
axes[1, 1].axis('off')
plt.tight_layout()
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/scalar/timedelta/test_timedelta.py | 2 | 19427 | """ test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import NaT, iNaT
import pandas as pd
from pandas import Timedelta, TimedeltaIndex, offsets, to_timedelta
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, "D")
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, "ns")
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
min_td - Timedelta(2, "ns")
with pytest.raises(OverflowError, match=msg):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, "ns")
assert td is NaT
with pytest.raises(OverflowError, match=msg):
Timedelta(min_td.value - 2, "ns")
with pytest.raises(OverflowError, match=msg):
Timedelta(max_td.value + 1, "ns")
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta("30S").total_seconds() == 30.0
assert Timedelta("0").total_seconds() == 0.0
assert Timedelta("-2S").total_seconds() == -2.0
assert Timedelta("5.324S").total_seconds() == 5.324
assert (Timedelta("30S").total_seconds() - 30.0) < 1e-20
assert (30.0 - Timedelta("30S").total_seconds()) < 1e-20
def test_resolution_string(self):
assert Timedelta(days=1).resolution_string == "D"
assert Timedelta(days=1, hours=6).resolution_string == "H"
assert Timedelta(days=1, minutes=6).resolution_string == "T"
assert Timedelta(days=1, seconds=6).resolution_string == "S"
assert Timedelta(days=1, milliseconds=6).resolution_string == "L"
assert Timedelta(days=1, microseconds=6).resolution_string == "U"
assert Timedelta(days=1, nanoseconds=6).resolution_string == "N"
def test_resolution_deprecated(self):
# GH#21344
td = Timedelta(days=4, hours=3)
result = td.resolution
assert result == Timedelta(nanoseconds=1)
# Check that the attribute is available on the class, mirroring
# the stdlib timedelta behavior
result = Timedelta.resolution
assert result == Timedelta(nanoseconds=1)
@pytest.mark.parametrize(
"value, expected",
[
(Timedelta("10S"), True),
(Timedelta("-10S"), True),
(Timedelta(10, unit="ns"), True),
(Timedelta(0, unit="ns"), False),
(Timedelta(-10, unit="ns"), True),
(Timedelta(None), True),
(NaT, True),
],
)
def test_truthiness(value, expected):
# https://github.com/pandas-dev/pandas/issues/21484
assert bool(value) is expected
def test_timedelta_attribute_precision():
# GH 31354
td = Timedelta(1552211999999999872, unit="ns")
result = td.days * 86400
result += td.seconds
result *= 1000000
result += td.microseconds
result *= 1000
result += td.nanoseconds
expected = td.value
assert result == expected
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/io/packers.py | 9 | 23542 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
"""
# portions of msgpack_numpy package, by Lev Givon were incorporated
# into this module (and tests_packers.py)
"""
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import numpy as np
from pandas import compat
from pandas.compat import u, PY3
from pandas import (
Timestamp, Period, Series, DataFrame, Panel, Panel4D,
Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, Float64Index,
NaT
)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.common import needs_i8_conversion
from pandas.io.common import get_filepath_or_buffer
from pandas.core.internals import BlockManager, make_block
import pandas.core.internals as internals
from pandas.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict[t]
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
# convert object
if dtype == np.object_:
return v.tolist()
if compressor == 'zlib':
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
import zlib
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
import blosc
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if dtype == np.object_:
return np.array(values, dtype=object)
if not as_is_ext:
values = values.encode('latin1')
if compress == 'zlib':
import zlib
values = zlib.decompress(values)
return np.frombuffer(values, dtype=dtype)
elif compress == 'blosc':
import blosc
values = blosc.decompress(values)
return np.frombuffer(values, dtype=dtype)
# from a string
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, PeriodIndex):
return {'typ': 'period_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'freq': getattr(obj, 'freqstr', None),
'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = tz.zone
obj = obj.tz_convert('UTC')
return {'typ': 'datetime_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'freq': getattr(obj, 'freqstr', None),
'tz': tz,
'compress': compressor}
elif isinstance(obj, MultiIndex):
return {'typ': 'multi_index',
'klass': obj.__class__.__name__,
'names': getattr(obj, 'names', None),
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
else:
return {'typ': 'index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
#d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
#for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
#return d
else:
return {'typ': 'series',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'index': obj.index,
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
#d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
#for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
#d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
#return d
elif isinstance(obj, SparsePanel):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
#d = {'typ': 'sparse_panel',
# 'klass': obj.__class__.__name__,
# 'items': obj.items}
#for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
#d['data'] = dict([(name, df)
# for name, df in compat.iteritems(obj)])
#return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {'typ': 'block_manager',
'klass': obj.__class__.__name__,
'axes': data.axes,
'blocks': [{'items': data.items.take(b.mgr_locs),
'locs': b.mgr_locs.as_array,
'values': convert(b.values),
'shape': b.values.shape,
'dtype': b.dtype.name,
'klass': b.__class__.__name__,
'compress': compressor
} for b in data.blocks]}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = tz.zone
offset = obj.offset
if offset is not None:
offset = offset.freqstr
return {'typ': 'timestamp',
'value': obj.value,
'offset': offset,
'tz': tz}
elif isinstance(obj, np.timedelta64):
return {'typ': 'timedelta64',
'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {'typ': 'timedelta',
'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {'typ': 'datetime64',
'data': str(obj)}
elif isinstance(obj, datetime):
return {'typ': 'datetime',
'data': obj.isoformat()}
elif isinstance(obj, date):
return {'typ': 'date',
'data': obj.isoformat()}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {'typ': 'period',
'ordinal': obj.ordinal,
'freq': obj.freq}
elif isinstance(obj, BlockIndex):
return {'typ': 'block_index',
'klass': obj.__class__.__name__,
'blocs': obj.blocs,
'blengths': obj.blengths,
'length': obj.length}
elif isinstance(obj, IntIndex):
return {'typ': 'int_index',
'klass': obj.__class__.__name__,
'indices': obj.indices,
'length': obj.length}
elif isinstance(obj, np.ndarray):
return {'typ': 'ndarray',
'shape': obj.shape,
'ndim': obj.ndim,
'dtype': obj.dtype.name,
'data': convert(obj),
'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {'typ': 'np_scalar',
'sub_typ': 'np_complex',
'dtype': obj.dtype.name,
'real': obj.real.__repr__(),
'imag': obj.imag.__repr__()}
else:
return {'typ': 'np_scalar',
'dtype': obj.dtype.name,
'data': obj.__repr__()}
elif isinstance(obj, complex):
return {'typ': 'np_complex',
'real': obj.real.__repr__(),
'imag': obj.imag.__repr__()}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get('typ')
if typ is None:
return obj
elif typ == 'timestamp':
return Timestamp(obj['value'], tz=obj['tz'], offset=obj['offset'])
elif typ == 'period':
return Period(ordinal=obj['ordinal'], freq=obj['freq'])
elif typ == 'index':
dtype = dtype_for(obj['dtype'])
data = unconvert(obj['data'], dtype,
obj.get('compress'))
return globals()[obj['klass']](data, dtype=dtype, name=obj['name'])
elif typ == 'multi_index':
dtype = dtype_for(obj['dtype'])
data = unconvert(obj['data'], dtype,
obj.get('compress'))
data = [tuple(x) for x in data]
return globals()[obj['klass']].from_tuples(data, names=obj['names'])
elif typ == 'period_index':
data = unconvert(obj['data'], np.int64, obj.get('compress'))
d = dict(name=obj['name'], freq=obj['freq'])
return globals()[obj['klass']](data, **d)
elif typ == 'datetime_index':
data = unconvert(obj['data'], np.int64, obj.get('compress'))
d = dict(name=obj['name'], freq=obj['freq'], verify_integrity=False)
result = globals()[obj['klass']](data, **d)
tz = obj['tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == 'series':
dtype = dtype_for(obj['dtype'])
index = obj['index']
return globals()[obj['klass']](unconvert(obj['data'], dtype,
obj['compress']),
index=index,
dtype=dtype,
name=obj['name'])
elif typ == 'block_manager':
axes = obj['axes']
def create_block(b):
values = unconvert(b['values'], dtype_for(b['dtype']),
b['compress']).reshape(b['shape'])
# locs handles duplicate column names, and should be used instead of items; see GH 9618
if 'locs' in b:
placement = b['locs']
else:
placement = axes[0].get_indexer(b['items'])
return make_block(values=values,
klass=getattr(internals, b['klass']),
placement=placement)
blocks = [create_block(b) for b in obj['blocks']]
return globals()[obj['klass']](BlockManager(blocks, axes))
elif typ == 'datetime':
return parse(obj['data'])
elif typ == 'datetime64':
return np.datetime64(parse(obj['data']))
elif typ == 'date':
return parse(obj['data']).date()
elif typ == 'timedelta':
return timedelta(*obj['data'])
elif typ == 'timedelta64':
return np.timedelta64(int(obj['data']))
#elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
#elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
#elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == 'block_index':
return globals()[obj['klass']](obj['length'], obj['blocs'],
obj['blengths'])
elif typ == 'int_index':
return globals()[obj['klass']](obj['length'], obj['indices'])
elif typ == 'ndarray':
return unconvert(obj['data'], np.typeDict[obj['dtype']],
obj.get('compress')).reshape(obj['shape'])
elif typ == 'np_scalar':
if obj.get('sub_typ') == 'np_complex':
return c2f(obj['real'], obj['imag'], obj['dtype'])
else:
dtype = dtype_for(obj['dtype'])
try:
return dtype(obj['data'])
except:
return dtype.type(obj['data'])
elif typ == 'np_complex':
return complex(obj['real'] + '+' + obj['imag'] + 'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='latin1', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='latin1',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='latin1',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='latin1',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| gpl-3.0 |
CompPhysics/MachineLearning | doc/src/SupportVMachines/Programs/cancer.py | 2 | 1731 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
# Logistic Regression
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
print("Test set accuracy with Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
# Support vector machine
svm = SVC(gamma='auto', C=100)
svm.fit(X_train, y_train)
print("Test set accuracy with SVM: {:.2f}".format(svm.score(X_test,y_test)))
# Decision Trees
deep_tree_clf = DecisionTreeClassifier(max_depth=None)
deep_tree_clf.fit(X_train, y_train)
print("Test set accuracy with Decision Trees: {:.2f}".format(deep_tree_clf.score(X_test,y_test)))
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Logistic Regression
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy Logistic Regression with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Support Vector Machine
svm.fit(X_train_scaled, y_train)
print("Test set accuracy SVM with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Decision Trees
deep_tree_clf.fit(X_train_scaled, y_train)
print("Test set accuracy with Decision Trees and scaled data: {:.2f}".format(deep_tree_clf.score(X_test_scaled,y_test)))
| cc0-1.0 |
sonnyhu/scikit-learn | sklearn/metrics/cluster/supervised.py | 11 | 33436 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0 or
classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, max_n_classes=5000):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
max_n_classes : int, optional (default=5000)
Maximal number of classes handled by the Fowlkes-Mallows
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred,)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
tk = np.dot(c.ravel(), c.ravel()) - n_samples
pk = np.sum(np.sum(c, axis=0) ** 2) - n_samples
qk = np.sum(np.sum(c, axis=1) ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
shenzebang/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 62 | 6762 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_warm_start():
est = PassiveAggressiveClassifier(warm_start=True, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
old_model = transformer.estimator_
transformer.fit(data, y)
new_model = transformer.estimator_
assert_true(old_model is new_model)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
anetasie/sherpa | sherpa/conftest.py | 1 | 17067 | #
# Copyright (C) 2016, 2017, 2018, 2019, 2020
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import os
import sys
import re
import logging
import pytest
import numpy as np
from numpy import VisibleDeprecationWarning
import numpy as np
from sherpa.utils.testing import SherpaTestCase
try:
from astropy.io.fits.verify import VerifyWarning
have_astropy = True
except ImportError:
have_astropy = False
try:
from sherpa.astro import xspec
has_xspec = True
except ImportError:
has_xspec = False
import sherpa.plot
import sherpa.astro.plot
from sherpa.plot import dummy_backend
try:
from sherpa.plot import pylab_backend
except ImportError:
pylab_backend = None
# In some instances, if some xvfb processes did not stop cleanly
# pytest-xfvb starts complaining that a virtual screen is already on
# the following code works around that. The issue is hard to reproduce
# but I (OL) tested it locally as it reappeared on my workstation.
try:
import random
from pyvirtualdisplay import abstractdisplay
abstractdisplay.RANDOMIZE_DISPLAY_NR = True
abstractdisplay.random = random
random.seed()
except ImportError:
pass
TEST_DATA_OPTION = "--test-data"
# Follow https://docs.pytest.org/en/latest/example/simple.html#control-skipping-of-tests-according-to-command-line-option
# for adding a command-line option to let slow-running tests be run
# (not by default), which combines with existing code and options.
#
def pytest_addoption(parser):
parser.addoption("-D", TEST_DATA_OPTION, action="store",
help="Alternative location of test data files")
parser.addoption("--runslow", action="store_true", default=False,
help="run slow tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
# Whilelist of known warnings. One can associate different warning messages
# to the same warning class
known_warnings = {
DeprecationWarning:
[
r"unorderable dtypes.*",
r"Non-string object detected for the array ordering.*",
r"using a non-integer number instead of an integer will result in an error in the future",
r"Use load_xstable_model to load XSPEC table models",
# This does not have to do with Sherpa and is coming from some versions of
# jupyter_client
r"metadata .* was set from the constructor.*",
# Matplotlib version 2 warnings (from HTML notebook represention)
#
r'np.asscalar\(a\) is deprecated since NumPy v1.16, use a.item\(\) instead',
r"Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working"
],
UserWarning:
[
r"File '/data/regression_test/master/in/sherpa/aref_sample.fits' does not have write permission. Changing to read-only mode.",
r"File '/data/regression_test/master/in/sherpa/aref_Cedge.fits' does not have write permission. Changing to read-only mode.",
r"Converting array .* to numpy array",
# Matplotlib version 2 warnings (from HTML notebook represention)
#
r'Attempting to set identical bottom==top results\nin singular transformations; automatically expanding.\nbottom=1.0, top=1.0',
# See issue #571 and tests
# test_ui_plot.py
# test_plot_fit_xxx_pylab
# test_plot_fit_xxx_overplot_pylab
r"Attempted to set non-positive xlimits for log-scale axis; invalid limits will be ignored.",
r"Attempted to set non-positive left xlim on a log-scaled axis.*",
],
RuntimeWarning:
[r"invalid value encountered in sqrt",
# See https://github.com/ContinuumIO/anaconda-issues/issues/6678
r"numpy.dtype size changed, may indicate binary " +
r"incompatibility. Expected 96, got 88",
# I am getting the following from astropy with at least python 2.7 during the conda tests
r"numpy.ufunc size changed, may indicate binary ",
],
VisibleDeprecationWarning:
[],
}
# Since Sherpa now requires Python 3.5 at a minumum, the following
# are always added, but kept as a separate dict and then merged
# to make it clearer where they came from.
#
python3_warnings = {
ResourceWarning:
[
r"unclosed file .*king_kernel.txt.* closefd=True>",
r"unclosed file .*phas.dat.* closefd=True>",
r"unclosed file .*data.txt.* closefd=True>",
r"unclosed file .*cstat.dat.* closefd=True>",
r"unclosed file .*data1.dat.* closefd=True>",
r"unclosed file .*aref_Cedge.fits.* closefd=True>",
r"unclosed file .*aref_sample.fits.* closefd=True>",
r"unclosed file .*/tmp.* closefd=True>",
# added for sherpa/astro/ui/tests/test_astro_ui_utils_unit.py
r"unclosed file .*/dev/null.* closefd=True>",
r"unclosed file .*table.txt.* closefd=True>",
],
RuntimeWarning:
[r"invalid value encountered in sqrt",
# See https://github.com/ContinuumIO/anaconda-issues/issues/6678
r"numpy.dtype size changed, may indicate binary " +
r"incompatibility. Expected 96, got 88",
# See https://github.com/numpy/numpy/pull/432
r"numpy.ufunc size changed"
],
}
known_warnings.update(python3_warnings)
if have_astropy:
astropy_warnings = {
# See bug #372 on GitHub
ImportWarning:
[
r"can't resolve.*__spec__.*__package__.*",
],
VerifyWarning:
[
r"Invalid keyword for column.*",
],
}
known_warnings.update(astropy_warnings)
"""
# Currently no matplotlib warnings to ignore, but leave code so it is
# easy to add one back in
try:
from matplotlib import MatplotlibDeprecationWarning
matplotlib_warnings = {
MatplotlibDeprecationWarning:
[]
}
known_warnings.update(matplotlib_warnings)
except ImportError:
pass
"""
# Can this be replaced by the warning support added in pytest 3.1?
# See https://docs.pytest.org/en/latest/warnings.html#warnings
#
@pytest.fixture(scope="function", autouse=True)
def capture_all_warnings(request, recwarn, pytestconfig):
"""
This fixture will run automatically before and after every test function is executed.
It uses pytest's infrastructure to get all recorded warnings and match them against the while list. If an
unknown warning is found, then the fixture finalizer will fail the specific test function.
In the verbose pytest report the test function will show twice if an unknown warning is captured: one with the
actual result of the test and one with an ERROR. The warning will be shown as part of the stderr stream.
Parameters
----------
request standard injected service for pytest fixtures
recwarn injected pytest service for accessing recorded warnings
pytestconfig injected service for accessing the configuration data
"""
def known(warning):
message = warning.message
for known_warning in known_warnings[type(message)]:
pattern = re.compile(known_warning)
if pattern.match(str(message)):
return True
return False
def fin():
warnings = [w for w in recwarn.list
if type(w.message) not in known_warnings or not known(w)]
nwarnings = len(warnings)
if nwarnings > 0:
# Only print out the first time a warning is seen; we could
# report this to the user explicitly, but rely on the
# counter information (i.e. i+1/nwarnings) to show that
# there were repeats.
#
swarnings = set([])
print("*** Warnings created: {}".format(nwarnings))
for i, w in enumerate(warnings):
sw = str(w)
if sw in swarnings:
continue
print("{}/{} {}".format(i + 1, nwarnings, w))
swarnings.add(sw)
assert 0 == nwarnings
request.addfinalizer(fin)
def pytest_configure(config):
"""
This configuration hook overrides the default mechanism for test data self-discovery, if the --test-data command line
option is provided
It also adds support for the "slow" test marker
Parameters
----------
config standard service injected by pytest
"""
try:
path = config.getoption(TEST_DATA_OPTION)
if path:
SherpaTestCase.datadir = path
except ValueError: # option not defined from command line, no-op
pass
config.addinivalue_line("markers", "slow: mark test as slow to run")
@pytest.fixture(scope="session")
def test_data_path():
"""
Fixture for getting the path to the test data
Returns
-------
path : basestring
The string with the path to the main test data folder.
"""
path = SherpaTestCase.datadir
if path is None:
raise RuntimeError("Test needs the requires_data decorator")
return path
@pytest.fixture(scope="session")
def make_data_path(test_data_path):
"""
Fixture for tests requiring the test data dir. It returns a function that can be used to make paths by using
path elements relative to the test data folder (which is flat, so in principle only the first element is required)
Returns
-------
make_data_path : func
A function that accepts a list of path elements to be joined with
the base data dir path. This function exits with a RuntimeError
if the data directory is None, pointing out the requires_data
decorator is needed.
"""
def wrapped(arg):
return os.path.join(test_data_path, arg)
return wrapped
def run_thread_function(name, scriptname, test_data_path):
"""Run a regression test from the sherpa-test-data submodule.
Parameters
----------
name : string
The name of the science thread to run (e.g., pha_read,
radpro). The name should match the corresponding thread
name in the sherpa-test-data submodule. See examples below.
scriptname : string
The suffix of the test script file name, usually "fit.py."
test_data_path : string
The path to the test data folder
Returns
-------
localsyms : dict
Any model parameters created by the script.
Examples
--------
Regression test script file names have the structure
"name-scriptname.py." By default, scriptname is set to "fit.py."
For example, if one wants to run the regression test
"pha_read-fit.py," they would write
>>> run_thread("pha_read")
If the regression test name is "lev3fft-bar.py," they would do
>>> run_thread("lev3fft", scriptname="bar.py")
"""
from sherpa.astro import ui
scriptname = name + "-" + scriptname
cwd = os.getcwd()
os.chdir(test_data_path)
# Need to add to localsyms so that the scripts can work, but we
# do not need (for now) to return all the local symbols, so
# also have a version just for model parameters.
#
localsyms = {}
modelsyms = {}
def assign_model(name, val):
localsyms[name] = val
modelsyms[name] = val
old_assign_model = ui.get_model_autoassign_func()
try:
with open(scriptname, "rb") as fh:
cts = fh.read()
ui.set_model_autoassign_func(assign_model)
exec(compile(cts, scriptname, 'exec'), {}, localsyms)
finally:
ui.set_model_autoassign_func(old_assign_model)
os.chdir(cwd)
return modelsyms
@pytest.fixture
def run_thread(test_data_path):
"""
Fixture that returns a function that can be used to run a thread.
Returns
-------
run_thread : function
The returned function is basically the run_thread_function function, using the test data path found at runtime
"""
def run_thread_wrapper(name, scriptname='fit.py'):
return run_thread_function(name, scriptname, test_data_path)
return run_thread_wrapper
@pytest.fixture
def clean_astro_ui():
"""Ensure sherpa.astro.ui.clean is called before AND after the test.
This also resets the XSPEC settings (if XSPEC support is provided).
See Also
--------
clean_ui
Notes
-----
It does NOT change the logging level; perhaps it should, but the
screen output is useful for debugging at this time.
"""
from sherpa.astro import ui
if has_xspec:
old_xspec = xspec.get_xsstate()
else:
old_xspec = None
ui.clean()
yield
ui.clean()
if old_xspec is not None:
xspec.set_xsstate(old_xspec)
@pytest.fixture
def clean_ui():
"""Ensure sherpa.ui.clean is called before AND after the test.
See Also
--------
clean_astro_ui
Notes
-----
It does NOT change the logging level; perhaps it should, but the
screen output is useful for debugging at this time.
"""
from sherpa import ui
ui.clean()
yield
ui.clean()
@pytest.fixture
def restore_xspec_settings():
"""Fixture to ensure that XSPEC settings are restored after the test.
The aim is to allow the test to change XSPEC settings but to
ensure they are restored to their default values after the test.
The logic for what should and should not be reset is left to
xspec.get_state/set_state. There are known issues with trying to
reset "XSET" values (e.g. you can only set them to "", not
"delete" them, and it is up to the model implementation to note
the value has changed).
"""
try:
from sherpa.astro import xspec
except ImportError:
# can I return from here safely?
return
# grab initial values
#
state = xspec.get_xsstate()
# return control to test
yield
# clean up after test
xspec.set_xsstate(state)
@pytest.fixture
def reset_seed(request):
"""Force a random seed after the test.
The random seed is set to np.random.seed() after the
test is done. It is expected that the test sets the
seed to an explicit value. Ideally we would use the
new NumPy RNG but we still need to support older NumPy
versions.
"""
yield
np.random.seed()
@pytest.fixture
def hide_logging():
"""Set Sherpa's logging to ERROR for the test.
"""
logger = logging.getLogger('sherpa')
olvl = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
yield
logger.setLevel(olvl)
@pytest.fixture
def old_numpy_printing():
"""Force NumPy to use old-style printing for the test.
This is only needed whilst we still support NumPy 1.13
(I think). Calling this fixture will ensure we have
consistent printing of NumPy arrays (to some degree
anyway).
The original printoptions is reset after the test is
run.
"""
oldopts = np.get_printoptions()
if 'legacy' in oldopts:
np.set_printoptions(legacy='1.13')
yield
if 'legacy' in oldopts:
np.set_printoptions(legacy=oldopts['legacy'])
PLOT_BACKENDS = [dummy_backend]
if pylab_backend is not None:
PLOT_BACKENDS.append(pylab_backend)
@pytest.fixture(params=PLOT_BACKENDS)
def override_plot_backend(request):
"""Override the plot backend for this test
Runs the test with the given plot backend and then restores the
original value.
Note that this does not reload the ui layers, which will have
retain reference to the original backend throughout.
"""
old = sherpa.plot.backend
# safety check
#
assert old.name == sherpa.astro.plot.backend.name
changed = old.name != request.param.name
if changed:
sherpa.plot.backend = request.param
sherpa.astro.plot.backend = request.param
yield
if changed:
sherpa.plot.backend = old
sherpa.astro.plot.backend = old
| gpl-3.0 |
alexmojaki/blaze | blaze/compute/tests/test_hdfstore.py | 14 | 1791 | import pytest
tables = pytest.importorskip('tables')
from blaze.compute.hdfstore import *
from blaze.utils import tmpfile
from blaze import symbol, discover, compute
import pandas as pd
from datetime import datetime
from odo import Chunks, resource, into
import os
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
def test_hdfstore():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, '/appendable', format='table')
df.to_hdf(fn, '/fixed')
hdf = resource('hdfstore://%s' % fn)
s = symbol('s', discover(hdf))
assert isinstance(compute(s.fixed, hdf),
(pd.DataFrame, pd.io.pytables.Fixed))
assert isinstance(compute(s.appendable, hdf),
(pd.io.pytables.AppendableFrameTable, Chunks))
s = symbol('s', discover(df))
f = resource('hdfstore://%s::/fixed' % fn)
a = resource('hdfstore://%s::/appendable' % fn)
assert isinstance(pre_compute(s, a), Chunks)
hdf.close()
f.parent.close()
a.parent.close()
def test_groups():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, '/data/fixed')
hdf = resource('hdfstore://%s' % fn)
assert discover(hdf) == discover({'data': {'fixed': df}})
s = symbol('s', discover(hdf))
assert list(compute(s.data.fixed, hdf).a) == [1, 2, 3, 4]
hdf.close()
| bsd-3-clause |
eubr-bigsea/tahiti | migrations/versions/54147db30380_fixing_some_sklearn_operations.py | 1 | 8064 | """fixing some sklearn operations.
Revision ID: 54147db30380
Revises: 29ecca388884
Create Date: 2020-01-23 12:51:44.638796
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '54147db30380'
down_revision = '29ecca388884'
branch_labels = None
depends_on = None
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
column('enable_conditions', String))
columns = ('id', 'name', 'type', 'required', 'order', 'default',
'suggested_widget', 'values_url', 'values', 'scope', 'form_id',
'enable_conditions')
data = [
(4372, 'output_distribution', 'TEXT', 0, 4, 'uniform', 'dropdown', None,
'[{"key": \"quantile\", \"value\": \"quantile\"}, '
'{\"key\": \"uniform\", \"value\": \"uniform\"}, '
'{\"key\": \"kmeans\", \"value\": \"kmeans\"}]', 'EXECUTION', 4014,
None),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _undo_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
column('enable_conditions', String)
)
columns = ('id', 'name', 'type', 'required', 'order', 'default',
'suggested_widget', 'values_url', 'values', 'scope', 'form_id',
'enable_conditions')
data = [
(4088, 'feature', 'TEXT', 0, 3, None, 'attribute-selector', None,
'{\"multiple\": false}', 'EXECUTION', 4017,
None),
(4073, 'output_distribution', 'TEXT', 0, 4, 'uniform', 'dropdown', None,
'[{"key": \"normal\", \"value\": \"normal\"}, '
'{\"key\": \"uniform\", \"value\": \"uniform\"}]', 'EXECUTION',
4014, None),
(4074, 'seed', 'INTEGER', 0, 5, None, 'integer', None, None,
'EXECUTION', 4014, None),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = ('id', 'locale', 'label', 'help')
data = [
(4372, 'en', 'Strategy',
'Strategy used to define the widths of the bins.'),
(4372, 'pt', 'Estratégia',
'Estratégia utilizada para definir o range dos bins.'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
# alterando o quantile-discretizer
("DELETE FROM operation_form_field_translation WHERE id = 4073 or id = 4074",
"""
INSERT INTO operation_form_field_translation VALUES (4073, 'en', 'Distribuition',
'Marginal distribution for the transformed data.'), (4074, 'en', 'Seed',
'The seed of the pseudo random number generator to use when shuffling the data.'),
(4073, 'pt', 'Distribuição', 'Distribuição marginal para os dados transformados.'),
(4074, 'pt', 'Seed', 'The seed of the pseudo random number generator to use when shuffling the data.');
"""),
("DELETE FROM operation_form_field WHERE id = 4073 or id = 4074", ""),
(_insert_operation_form_field, "DELETE FROM operation_form_field WHERE id = 4372"),
(_insert_operation_form_field_translation, 'DELETE FROM operation_form_field_translation WHERE id = 4372'),
("UPDATE operation_form_field SET `default`= 5 WHERE id = 4072", ""),
# desabilitar o campo feature attribute
("DELETE FROM operation_form_field_translation WHERE id = 4088",
"""
INSERT INTO operation_form_field_translation (id, locale, label, help) VALUES
(4088, 'en', 'Feature attribute (if clustering)', 'Feature attribute (only if is clustering model).'),
(4088, 'pt', 'Atributo de features', 'Atributo usado como features (apenas se é modelo de agrupamento).');
"""),
("DELETE FROM operation_form_field WHERE id = 4088", _undo_operation_form_field),
# desabilitar a operação evaluate with cross-validation
("DELETE FROM operation_platform WHERE operation_id = 43 AND platform_id = 4;",
"INSERT INTO operation_platform VALUES (43, 4);"),
# habilitando o scatter plot
('UPDATE operation SET enabled=0 where id = 80', 'UPDATE operation SET enabled=1 where id = 80'),
('INSERT INTO operation_platform VALUES (87, 4);',
'DELETE FROM operation_platform WHERE operation_id = 87 and platform_id = 4;'),
# remover bars do Map
("UPDATE operation_form_field SET `values` = "
"'[{\"en\": \"Points\", \"key\": \"points\", \"value\": \"Points\", \"pt\": \"Pontos\"}, "
" {\"en\": \"Heatmap\", \"key\": \"heatmap\", \"value\": \"Heatmap\", \"pt\": \"Mapa de calor (heatmap)\"},"
" {\"en\": \"Polygon (Geo JSON)\", \"key\": \"polygon\", \"value\": \"Polygon (Geo JSON)\", "
"\"pt\": \"Polígono (Geo JSON)\"}]' "
"WHERE id = 332;",
"UPDATE operation_form_field SET `values` = "
"'[{\"en\": \"Bar\", \"key\": \"bar\", \"value\": \"Bar\", \"pt\": \"Barras\"},"
" {\"en\": \"Points\", \"key\": \"points\", \"value\": \"Points\", \"pt\": \"Pontos\"}, "
" {\"en\": \"Heatmap\", \"key\": \"heatmap\", \"value\": \"Heatmap\", \"pt\": \"Mapa de calor (heatmap)\"},"
" {\"en\": \"Polygon (Geo JSON)\", \"key\": \"polygon\", \"value\": \"Polygon (Geo JSON)\", \"pt\": "
"\"Polígono (Geo JSON)\"}]' "
"WHERE id = 332;"),
# execute-sql
('UPDATE operation_port SET slug="input data 1" WHERE id = 4025;', ''),
# read-shapefile
("UPDATE operation_form_field SET type='INTEGER', suggested_widget='lookup', "
"values_url='`${LIMONERO_URL}/datasources?format=SHAPEFILE&simple=true&list=true&enabled=1`' WHERE id=3104",
"UPDATE operation_form_field SET type='TEXT', suggested_widget='text', values_url='' WHERE id=3104"),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
cmds = cmd[0].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
cmds = cmd[1].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit()
| apache-2.0 |
vermouthmjl/scikit-learn | sklearn/cross_validation.py | 4 | 67659 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) + len(test) < self.n_train + self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
n_missing_train = self.n_train - len(train)
n_missing_test = self.n_test - len(test)
if n_missing_train > 0:
train.extend(missing_idx[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_idx[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
blond-admin/BLonD | blond/toolbox/filters_and_fitting.py | 2 | 7393 | # coding: utf-8
# Copyright 2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Fitting and filters routines to be used alone or with the Profile class in
the beam package. **
:Authors: **Danilo Quartullo**, **Alexandre Lasheen**,
**Juan F. Esteban Mueller**
'''
import numpy as np
from scipy.signal import cheb2ord, cheby2, filtfilt, freqz
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def beam_profile_filter_chebyshev(Y_array, X_array, filter_option):
"""
This routine is filtering the beam profile with a type II Chebyshev
filter. The input is a library having the following structure and
informations:
filter_option = {'type':'chebyshev', 'pass_frequency':pass_frequency,
'stop_frequency':stop_frequency, 'gain_pass':gain_pass,
'gain_stop':gain_stop}
The function returns nCoefficients, the number of coefficients used
in the filter. You can also add the following option to plot and return
the filter transfer function:
filter_option = {..., 'transfer_function_plot':True}
"""
noisyProfile = np.array(Y_array)
freqSampling = 1 / (X_array[1] - X_array[0])
nyqFreq = freqSampling / 2.
frequencyPass = filter_option['pass_frequency'] / nyqFreq
frequencyStop = filter_option['stop_frequency'] / nyqFreq
gainPass = filter_option['gain_pass']
gainStop = filter_option['gain_stop']
# Compute the lowest order for a Chebyshev Type II digital filter
nCoefficients, wn = cheb2ord(frequencyPass, frequencyStop, gainPass,
gainStop)
# Compute the coefficients a Chebyshev Type II digital filter
b, a = cheby2(nCoefficients, gainStop, wn, btype='low')
# Apply the filter forward and backwards to cancel the group delay
Y_array = filtfilt(b, a, noisyProfile)
Y_array = np.ascontiguousarray(Y_array)
if (('transfer_function_plot' in filter_option)
and filter_option['transfer_function_plot']):
# Plot the filter transfer function
w, transferGain = freqz(b, a=a, worN=len(Y_array))
transferFreq = w / np.pi * nyqFreq
group_delay = -np.diff(-np.unwrap(-np.angle(transferGain))) / \
-np.diff(w*freqSampling)
plt.figure()
ax1 = plt.subplot(311)
plt.plot(transferFreq, 20 * np.log10(abs(transferGain)))
plt.ylabel('Magnitude [dB]')
plt.subplot(312, sharex=ax1)
plt.plot(transferFreq, np.unwrap(-np.angle(transferGain)))
plt.ylabel('Phase [rad]')
plt.subplot(313, sharex=ax1)
plt.plot(transferFreq[:-1], group_delay)
plt.ylabel('Group delay [s]')
plt.xlabel('Frequency [Hz]')
# Plot the bunch spectrum and the filter transfer function
plt.figure()
plt.plot(
np.fft.fftfreq(len(Y_array), X_array[1]-X_array[0]),
20.*np.log10(np.abs(np.fft.fft(noisyProfile))))
plt.xlabel('Frequency [Hz]')
plt.twinx()
plt.plot(transferFreq, 20 * np.log10(abs(transferGain)), 'r')
plt.xlim(0, plt.xlim()[1])
plt.show()
return Y_array
else:
return Y_array
def gaussian_fit(Y_array, X_array, p0):
"""
Gaussian fit of the profile, in order to get the bunch length and
position. Returns fit values in units of s.
"""
return curve_fit(gauss, X_array, Y_array, p0)[0]
def gauss(x, *p):
"""
Defined as:
.. math:: A \, e^{\\frac{\\left(x-x_0\\right)^2}{2\\sigma_x^2}}
"""
A, x0, sx = p
return A*np.exp(-(x-x0)**2/2./sx**2)
def rms(Y_array, X_array):
"""
Computation of the RMS bunch length and position from the line
density (bunch length = 4sigma).
"""
timeResolution = X_array[1]-X_array[0]
lineDenNormalized = Y_array / np.trapz(Y_array, dx=timeResolution)
bp_rms = np.trapz(X_array * lineDenNormalized, dx=timeResolution)
bl_rms = 4 * np.sqrt(
np.trapz((X_array-bp_rms)**2 * lineDenNormalized, dx=timeResolution))
return bp_rms, bl_rms
def fwhm(Y_array, X_array, shift=0):
"""
Computation of the bunch length and position from the FWHM
assuming Gaussian line density.
"""
half_max = shift + 0.5 * (Y_array.max() - shift)
# First aproximation for the half maximum values
taux = np.where(Y_array >= half_max)
t1 = taux[0][0]
t2 = taux[0][-1]
# Interpolation of the time where the line density is half the maximum
bin_size = X_array[1]-X_array[0]
try:
t_left = X_array[t1] - bin_size * \
(Y_array[t1] - half_max) / \
(Y_array[t1] - Y_array[t1-1])
t_right = X_array[t2] + bin_size * \
(Y_array[t2] - half_max) / \
(Y_array[t2]-Y_array[t2+1])
bl_fwhm = 4 * (t_right-t_left) / (2 * np.sqrt(2 * np.log(2)))
bp_fwhm = (t_left+t_right)/2
except:
bl_fwhm = np.nan
bp_fwhm = np.nan
return bp_fwhm, bl_fwhm
def fwhm_multibunch(Y_array, X_array, n_bunches,
bunch_spacing_buckets, bucket_size_tau,
bucket_tolerance=0.40, shift=0):
"""
Computation of the bunch length and position from the FWHM
assuming Gaussian line density for multibunch case.
"""
bl_fwhm = np.zeros(n_bunches)
bp_fwhm = np.zeros(n_bunches)
for indexBunch in range(0, n_bunches):
left_edge = indexBunch * bunch_spacing_buckets * bucket_size_tau -\
bucket_tolerance * bucket_size_tau
right_edge = indexBunch * bunch_spacing_buckets * bucket_size_tau +\
bucket_size_tau + bucket_tolerance * bucket_size_tau
indexes_bucket = np.where((X_array > left_edge) *
(X_array < right_edge))[0]
bl_fwhm[indexBunch], bp_fwhm[indexBunch] = fwhm(
Y_array[indexes_bucket], X_array[indexes_bucket], shift)
return bl_fwhm, bp_fwhm
def rms_multibunch(Y_array, X_array, n_bunches,
bunch_spacing_buckets, bucket_size_tau,
bucket_tolerance=0.40):
"""
Computation of the rms bunch length (4sigma) and position.
"""
bl_rms = np.zeros(n_bunches)
bp_rms = np.zeros(n_bunches)
for indexBunch in range(0, n_bunches):
left_edge = indexBunch * bunch_spacing_buckets * bucket_size_tau -\
bucket_tolerance * bucket_size_tau
right_edge = indexBunch * bunch_spacing_buckets * bucket_size_tau +\
bucket_size_tau + bucket_tolerance * bucket_size_tau
indexes_bucket = np.where((X_array > left_edge) *
(X_array < right_edge))[0]
bl_rms[indexBunch], bp_rms[indexBunch] = rms(
Y_array[indexes_bucket],
X_array[indexes_bucket])
return bl_rms, bp_rms
| gpl-3.0 |
klim-/pyplane | gui/App_PyPlane.py | 1 | 21182 | # -*- coding: utf-8 -*-
# Copyright (C) 2013
# by Klemens Fritzsche, pyplane@leckstrom.de
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Klemens Fritzsche'
# this file contains the central class that inherits from the base gui class (VIEW) that
# was created using qt4-designer and pyuic4
# the class pyplaneMainWindow represents the CONTROLLER element of the mvc-structure
from PyQt4 import QtGui
from PyQt4 import QtCore
import traceback
import sys
import os
import sympy as sp
from IPython import embed
import numpy as np
import ast
from Ui_PyPlane import Ui_pyplane
from core.Logging import myLogger
from core.ConfigHandler import myConfig
from core.System import System
import core.PyPlaneHelpers as myHelpers
from gui.Widgets import SettingsWidget
def handle_exception(error):
myLogger.error_message("Error: An Python Exception occured.")
myLogger.debug_message(str(type(error)))
myLogger.debug_message(str(error))
myLogger.message("See the log file config/logmessages.txt for full traceback ")
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
tb_msg = "".join(lines)
myLogger.append_to_file(tb_msg)
class PyplaneMainWindow(QtGui.QMainWindow, Ui_pyplane):
def __init__(self, parent=None):
super(PyplaneMainWindow, self).__init__()
QtGui.QWidget.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle('PyPlane')
myLogger.register_output(self.logField)
# Check if LaTeX and dvipng is installed on the system. This
# is required in order to ensure that advanced formatting in
# matplotlib works correctly (\left, \begin{array} etc.)
self.latex_installed = myHelpers.check_if_latex()
# Embed SettingsWidget:
self.mySettings = SettingsWidget()
self.SettingsLayout.addWidget(self.mySettings)
self.fct_stack = []
self.linearization_stack = []
self.systems = []
self.xDotLabel.setText(u"\u1E8B(x,y) = ")
self.yDotLabel.setText(u"\u1E8F(x,y) = ")
try:
test = myConfig.read("Test", "test_var")
except:
test = "Could not load config file. Please check existence"
myLogger.debug_message("Loading config file: " + test)
def init(self):
""" This function gets called only after program start.
"""
# load tmp file
try:
self.load_tmp_system()
self.disable_menu_items()
except:
pass
def disable_menu_items(self):
# uncheck items
self.toggle_vectorfield_action.setChecked(False)
self.toggle_streamlines_action.setChecked(False)
self.toggle_equilibrium_action.setChecked(False)
self.toggle_nullclines_action.setChecked(False)
# shade out items:
self.toggle_vectorfield_action.setEnabled(False)
self.toggle_streamlines_action.setEnabled(False)
self.toggle_equilibrium_action.setEnabled(False)
if hasattr(self, "linearize_action"):
self.linearize_action.setEnabled(False)
self.toggle_nullclines_action.setEnabled(False)
def update_ui(self):
# unshade items
self.toggle_vectorfield_action.setEnabled(True)
self.toggle_streamlines_action.setEnabled(True)
self.toggle_equilibrium_action.setEnabled(True)
self.toggle_nullclines_action.setEnabled(True)
# check items
system = self.get_current_system()
if hasattr(system, "Phaseplane"):
if hasattr(system.Phaseplane, "VF"):
self.toggle_vectorfield_action.setChecked(system.Phaseplane.VF.tgl)
if hasattr(system.Phaseplane, "SL"):
self.toggle_streamlines_action.setChecked(system.Phaseplane.SL.tgl)
if hasattr(system.Phaseplane, "Equilibria"):
self.toggle_equilibrium_action.setChecked(system.Phaseplane.Equilibria.tgl)
if hasattr(system.Phaseplane, "Nullclines"):
self.toggle_nullclines_action.setChecked(system.Phaseplane.Nullclines.tgl)
# equation:
self.xDotLineEdit.setText(system.equation.x_dot_string)
self.yDotLineEdit.setText(system.equation.y_dot_string)
def initialize_ui(self):
# gets called after submitting a system (updae_ui() cannot be
# used since the new system tab is not visible yet
# values are taken from config file
self.toggle_vectorfield_action.setChecked(myConfig.get_boolean("Vectorfield", "vf_onByDefault"))
self.toggle_streamlines_action.setChecked(myConfig.get_boolean("Streamlines", "stream_onByDefault"))
self.toggle_equilibrium_action.setChecked(False)
self.toggle_nullclines_action.setChecked(myConfig.get_boolean("Nullclines", "nc_onByDefault"))
def new_linearized_system(self, equation, name, equilibrium):
x_string, y_string = equation
xe, ye = equilibrium
system = System(self, equation, name=name, linear=True)
self.systems.insert(0, system)
system.plot_eigenvectors(equilibrium)
myLogger.message("------ new linearized system created ------")
myLogger.message(" x' = " + str(system.equation.what_is_my_system()[0]))
myLogger.message(" y' = " + str(system.equation.what_is_my_system()[1]) + "\n", )
def close_current_tab(self):
index = self.tabWidget.currentIndex()
if index != self.tabWidget.count()-1:
self.tabWidget.removeTab(index)
self.systems.pop(index)
self.update_ui()
def close_all_tabs(self):
for i in xrange(self.tabWidget.count()-1):
self.tabWidget.removeTab(i)
# TODO: Delete Data
self.update_ui()
def initialize_new_system_tab(self):
# Create new system tab
self.mySystemTab = SystemTabWidget()
contents = QtGui.QWidget(self.tabWidget)
self.mySystemTab.setupUi(contents)
number = self.tabWidget.count()
self.tabWidget.insertTab(0, contents, "System %s" % (str(number)))
self.tabWidget.setCurrentIndex(0)
# TODO: check why this can't be done within the SystemTabWidget
# class!
self.ppWidget = PhaseplaneWidget(self)
self.mySystemTab.ppLayout.addWidget(self.ppWidget)
self.xWidget = ZoomWidgetSimple()
self.mySystemTab.xLayout.addWidget(self.xWidget)
self.yWidget = ZoomWidgetSimple()
self.mySystemTab.yLayout.addWidget(self.yWidget)
def new_system(self, equation):
system = System(self, equation)
self.systems.insert(0, system)
def submit(self):
""" This function gets called after clicking on the submit button
"""
try:
xtxt = str(self.xDotLineEdit.text())
ytxt = str(self.yDotLineEdit.text())
except UnicodeEncodeError as exc:
myLogger.warn_message("UnicodeEncodeError! Please check input.")
myLogger.debug_message(str(exc))
else:
cond1 = str(self.xDotLineEdit.text()) != ""
cond2 = str(self.yDotLineEdit.text()) != ""
if cond1 and cond2:
x_string = str(self.xDotLineEdit.text())
y_string = str(self.yDotLineEdit.text())
equation = (x_string, y_string)
system = System(self, equation)
self.systems.insert(0, system)
self.save_tmp_system()
myLogger.message("------ new system created ------")
myLogger.message(" x' = " + str(system.equation.what_is_my_system()[0]))
myLogger.message(" y' = " + str(system.equation.what_is_my_system()[1]) + "\n", )
else:
myLogger.error_message("Please check system!")
def load_system(self, file_name):
""" load previous system (from tmp file) """
with open(file_name, 'r') as sysfile:
# TODO: This does not work, but how would i find a way to store a
# system?
#~ pps_file = pcl.loads(sysfile.read())
#~ system = System(self)
#~ system.unpickle(pps_file)
#~ self.systems.insert(0, system)
#~ self.xDotLineEdit.setText(sysfile.readline().strip())
#~ self.yDotLineEdit.setText(sysfile.readline().strip())
xdot_string = str(sysfile.readline())
ydot_string = str(sysfile.readline())
self.xDotLineEdit.setText(xdot_string.strip())
self.yDotLineEdit.setText(ydot_string.strip())
myLogger.message(file_name + " loaded")
def load_tmp_system(self):
self.load_system('library/tmp.ppf')
def load_system_from_file(self):
file_name = QtGui.QFileDialog.getOpenFileName(self,
'Open pyplane file', '',
'pyplane file (*.ppf)')
if len(file_name) > 0:
self.load_system(file_name)
self.submit()
self.update_ui()
def save_tmp_system(self):
if len(self.systems) > 0:
index = self.tabWidget.currentIndex()
system = self.systems[index]
file_name = 'library/tmp.ppf'
self.save_system(file_name, system.equation.what_is_my_system())
else:
myLogger.error_message("There is no system to save!")
def save_file(self):
if len(self.systems) > 0:
index = self.tabWidget.currentIndex()
system = self.systems[index]
file_name, filter = QtGui.QFileDialog.getSaveFileNameAndFilter(self,
'Save pyplane file', '',
'pyplane file (*.ppf)')
#~ sys_pickleds = system.pickle(file_name)
#~ system.equation.what_is_my_system()
self.save_system(file_name, system.equation.what_is_my_system())
else:
myLogger.error_message("There is no system to save!")
def save_system(self, file_name, system):
x_dot_string = str(system[0])
y_dot_string = str(system[1])
f_ending = '.ppf'
f_len = len(file_name)
if file_name[f_len - 4:f_len] == f_ending:
with open(file_name, 'w') as sysfile:
sysfile.write(x_dot_string + "\n" + y_dot_string)
else:
with open(file_name + f_ending, 'w') as sysfile:
sysfile.write(x_dot_string + "\n" + y_dot_string)
myLogger.message("System saved as " + file_name)
def export_as(self):
""" export dialog for pyplane plot
"""
q_files_types = QtCore.QString(".png;;.svg;;.pdf;;.eps")
q_file_name, q_file_type = QtGui.QFileDialog.getSaveFileNameAndFilter(self,
"Export PyPlane Plot as .png, .svg, .pdf, or .eps-file", "",
q_files_types)
# Ensure we are out of the QString world in the following
file_name = str(q_file_name)
file_type = str(q_file_type)
if file_name:
# Fix: Under some KDE's the file_type is returned empty because
# of a "DBUS-error". Hence, in such cases, we try to take the
# file_type from the extension specified by the user . If no valid extension
# is set by the user file_type is set to png. This bugfix is addressed
# in the first part of the "if not" structure.
#
# In the else part of the "if not" structure the case is handled
# where the user wants to have dots in the basename of the file
# (affects all operating systems)
#
file_name2, file_type2 = os.path.splitext(file_name)
if not file_type:
if file_type2 not in [".png", ".svg", ".pdf", ".eps"]:
file_type = ".png"
else:
# Allow things like figure.case21.pdf
file_name = file_name2
file_type = file_type2
else:
# This part runs on non KDE-systems or KDE-systems without
# the DBUS error:
# drop accidently added duplicate file extensions
# (avoid figure.png.png but allow figure.case1.png)
if file_type2 == file_type:
file_name = file_name2
# ------
if file_type == ".png":
self.export_as_png(file_name)
elif file_type == ".svg":
self.export_as_svg(file_name)
elif file_type == ".pdf":
self.export_as_pdf(file_name)
elif file_type == ".eps":
self.export_as_eps(file_name)
else:
myLogger.error_message("Filetype-Error")
def update_window_range_lineedits(self):
""" this function will update_all every window size line edit
"""
# phase plane
xmin1, xmax1, ymin1, ymax1 = self.myGraph.get_limits(self.myGraph.plot_pp)
self.PP_xminLineEdit.setText(str(round(xmin1, 2)))
self.PP_xmaxLineEdit.setText(str(round(xmax1, 2)))
self.PP_yminLineEdit.setText(str(round(ymin1, 2)))
self.PP_ymaxLineEdit.setText(str(round(ymax1, 2)))
# x(t)
xmin2, xmax2, ymin2, ymax2 = self.myGraph.get_limits(self.myGraph.plot_x)
self.X_tminLineEdit.setText(str(round(xmin2, 2)))
self.X_tmaxLineEdit.setText(str(round(xmax2, 2)))
self.X_xminLineEdit.setText(str(round(ymin2, 2)))
self.X_xmaxLineEdit.setText(str(round(ymax2, 2)))
# y(t)
xmin3, xmax3, ymin3, ymax3 = self.myGraph.get_limits(self.myGraph.plot_y)
self.Y_tminLineEdit.setText(str(round(xmin3, 2)))
self.Y_tmaxLineEdit.setText(str(round(xmax3, 2)))
self.Y_yminLineEdit.setText(str(round(ymin3, 2)))
self.Y_ymaxLineEdit.setText(str(round(ymax3, 2)))
def export_as_png(self, filename):
system = self.get_current_system()
if system != None:
filename_pp = str(filename) + "_pp.png"
system.Phaseplane.Plot.canvas.fig.savefig(filename_pp,
bbox_inches='tight')
filename_x = str(filename) + "_x.png"
system.Xt.Plot.canvas.fig.savefig(filename_x, bbox_inches='tight')
filename_y = str(filename) + "_y.png"
system.Yt.Plot.canvas.fig.savefig(filename_y, bbox_inches='tight')
myLogger.message(
"plot exported as\n\t" + filename_pp + ",\n\t" + filename_x + ",\n\t" + filename_y)
def export_as_svg(self, filename):
system = self.get_current_system()
if system != None:
filename_pp = str(filename) + "_pp.svg"
system.Phaseplane.Plot.canvas.fig.savefig(filename_pp, bbox_inches='tight')
filename_x = str(filename) + "_x.svg"
system.Xt.Plot.canvas.fig.savefig(filename_x, bbox_inches='tight')
filename_y = str(filename) + "_y.svg"
system.Yt.Plot.canvas.fig.savefig(filename_y, bbox_inches='tight')
myLogger.message("plot exported as\n\t" + filename_pp + ",\n\t" + filename_x + ",\n\t" + filename_y)
def export_as_eps(self, filename):
system = self.get_current_system()
if system != None:
filename_pp = str(filename) + "_pp.eps"
system.Phaseplane.Plot.canvas.fig.savefig(filename_pp, bbox_inches='tight')
filename_x = str(filename) + "_x.eps"
system.Xt.Plot.canvas.fig.savefig(filename_x, bbox_inches='tight')
filename_y = str(filename) + "_y.eps"
system.Yt.Plot.canvas.fig.savefig(filename_y, bbox_inches='tight')
myLogger.message("plot exported as\n\t" + filename_pp + ",\n\t" + filename_x + ",\n\t" + filename_y)
def export_as_pdf(self, filename):
system = self.get_current_system()
if system != None:
filename_pp = str(filename) + "_pp.pdf"
system.Phaseplane.Plot.canvas.fig.savefig(filename_pp, bbox_inches='tight')
filename_x = str(filename) + "_x.pdf"
system.Xt.Plot.canvas.fig.savefig(filename_x, bbox_inches='tight')
filename_y = str(filename) + "_y.pdf"
system.Yt.Plot.canvas.fig.savefig(filename_y, bbox_inches='tight')
myLogger.message("plot exported as\n\t" + filename_pp + ",\n\t" + filename_x + ",\n\t" + filename_y)
def add_function_to_plot(self):
""" will plot additional functions and put it on a stack
"""
self.x = sp.symbols('x')
self.y = sp.symbols('y')
self.fct = None
try:
fct_txt = str(self.yLineEdit.text())
except UnicodeEncodeError as exc:
myLogger.error_message("input error!")
myLogger.debug_message(str(exc))
if fct_txt != "":
try:
self.fct_string = str(self.yLineEdit.text())
self.fct_expr = sp.sympify(self.fct_string)
# self.fct = sp.lambdify(self.x,self.fct_expr,'numpy')
self.fct = sp.lambdify((self.x, self.y), self.fct_expr, 'numpy')
xmin, xmax, ymin, ymax = self.myGraph.get_limits(self.myGraph.plot_pp)
# plot the function for an x-interval twice as big as the current window
deltax = (xmax - xmin) / 2
deltay = (ymax - ymin) / 2
plot_xmin = xmin - deltax
plot_xmax = xmax + deltax
plot_ymin = ymin - deltay
plot_ymax = ymax + deltay
pts_in_x = int(myConfig.read("Functions", "fct_gridPointsInX"))
pts_in_y = int(myConfig.read("Functions", "fct_gridPointsInY"))
fct_color = myConfig.read("Functions", "fct_color")
fct_linewidth = float(myConfig.read("Functions", "fct_linewidth"))
x = np.arange(plot_xmin, plot_xmax, (xmax - xmin) / pts_in_x)
y = np.arange(plot_ymin, plot_ymax, (ymax - ymin) / pts_in_y)
X, Y = np.meshgrid(x, y)
#yvalue = self.fct(xvalue)
myfunc = self.fct(X, Y)
# TODO: plots like y=1/x have a connection between -inf and +inf that is not actually there!
# plot function and put on function-stack
new_fct = self.myGraph.plot_pp.axes.contour(X, Y, myfunc, [0],
zorder=100,
linewidths=fct_linewidth,
colors=fct_color)
# new_fct = self.myGraph.plot_pp.axes.plot(xvalue, yvalue, label="fct", color="green")
self.fct_stack.append(new_fct)
self.myGraph.update_graph(self.myGraph.plot_pp)
myLogger.message("function plot: 0 = " + self.fct_string)
except Exception as error:
handle_exception(error)
else:
myLogger.error_message("Please enter function.")
def remove_function_from_plot(self):
if len(self.fct_stack) != 0:
for i in xrange(0, len(self.fct_stack)):
fct = self.fct_stack.pop().collections
for j in xrange(0, len(fct)):
try:
fct[j].remove()
except Exception as error:
myLogger.debug_message("couldn't delete function")
myLogger.debug_message(str(type(error)))
myLogger.debug_message(str(error))
myLogger.message("functions removed")
else:
myLogger.debug_message("no function to delete")
self.myGraph.update_graph(self.myGraph.plot_pp)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window_object = PyplaneMainWindow()
window_object.showFullScreen()
window_object.show()
app.exec_()
| gpl-3.0 |
SaganBolliger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dates.py | 54 | 33991 | #!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing
on the shoulders of python :mod:`datetime`, the add-on modules
:mod:`pytz` and :mod:`dateutils`. :class:`datetime` objects are
converted to floating point numbers which represent the number of days
since 0001-01-01 UTC. The helper functions :func:`date2num`,
:func:`num2date` and :func:`drange` are used to facilitate easy
conversion to and from :mod:`datetime` and numeric ranges.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutils.rrule` (`dateutil
<https://moin.conectiva.com.br/DateUtil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
Date formatters
---------------
Here all all the date formatters:
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
import re, time, math, datetime
import pytz
# compatability for 2008c and older versions
try:
import pytz.zoneinfo
except ImportError:
pytz.zoneinfo = pytz.tzinfo
pytz.zoneinfo.UTC = pytz.UTC
import matplotlib
import numpy as np
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from pytz import timezone
from dateutil.rrule import rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, \
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY
from dateutil.relativedelta import relativedelta
import dateutil.parser
__all__ = ( 'date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'DateLocator', 'RRuleLocator',
'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR',
'SA', 'SU', 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
UTC = pytz.timezone('UTC')
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
return pytz.timezone(s)
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60.*HOURS_PER_DAY
SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None: tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24*remainder, 1)
minute, remainder = divmod(60*remainder, 1)
second, remainder = divmod(60*remainder, 1)
microsecond = int(1e6*remainder)
if microsecond<10: microsecond=0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond>999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6-microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC.
"""
if not cbook.iterable(d): return _to_ordinalf(d)
else: return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j): j = np.asarray(j)
return j + 1721425.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n): n = np.asarray(n)
return n - 1721425.5
def num2date(x, tz=None):
"""
*x* is a float value which gives number of days (fraction part
represents hours, minutes, seconds) since 0001-01-01 00:00:00 UTC.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None: tz = _get_rc_timezone()
if not cbook.iterable(x): return _from_ordinalf(x, tz)
else: return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds/SECONDS_PER_DAY +
delta.microseconds/MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
return np.arange(f1, f2, step)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = self._findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None: tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind>=len(self.t) or ind<=0: return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None):
self._locator = locator
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", tz)
self._tz = tz
def __call__(self, x, pos=0):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter("%Y", self._tz)
elif ( scale == 30.0 ):
self._formatter = DateFormatter("%b %Y", self._tz)
elif ( (scale == 1.0) or (scale == 7.0) ):
self._formatter = DateFormatter("%b %d %Y", self._tz)
elif ( scale == (1.0/24.0) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*60)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*3600)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
else:
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour':0, 'byminute':0,'bysecond':0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def nonsingular(self, vmin, vmax):
unit = self._get_unit()
vmin -= 2*unit
vmax += 2*unit
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try: dmin, dmax = self.viewlim_to_dt()
except ValueError: return []
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dates = self.rule.between(dmin, dmax, True)
return date2num(dates)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
if ( freq == YEARLY ):
return 365
elif ( freq == MONTHLY ):
return 30
elif ( freq == WEEKLY ):
return 7
elif ( freq == DAILY ):
return 1
elif ( freq == HOURLY ):
return (1.0/24.0)
elif ( freq == MINUTELY ):
return (1.0/(24*60))
elif ( freq == SECONDLY ):
return (1.0/(24*3600))
else:
# error
return -1 #or should this just return '1'?
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin: vmin=dmin
vmax = self.rule.after(dmax, True)
if not vmax: vmax=dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None):
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if ( self._freq == YEARLY ):
return 365.0
elif ( self._freq == MONTHLY ):
return 30.0
elif ( self._freq == WEEKLY ):
return 7.0
elif ( self._freq == DAILY ):
return 1.0
elif ( self._freq == HOURLY ):
return 1.0/24
elif ( self._freq == MINUTELY ):
return 1.0/(24*60)
elif ( self._freq == SECONDLY ):
return 1.0/(24*3600)
else:
# error
return -1
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range(1, 13)
if ( (0 <= numMonths) and (numMonths <= 14) ):
interval = 1 # show every month
elif ( (15 <= numMonths) and (numMonths <= 29) ):
interval = 3 # show every 3 months
elif ( (30 <= numMonths) and (numMonths <= 44) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range(1, 32)
if ( (0 <= numDays) and (numDays <= 9) ):
interval = 1 # show every day
elif ( (10 <= numDays) and (numDays <= 19) ):
interval = 2 # show every 2 days
elif ( (20 <= numDays) and (numDays <= 49) ):
interval = 3 # show every 3 days
elif ( (50 <= numDays) and (numDays <= 99) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range(0, 24) # show every hour
if ( (0 <= numHours) and (numHours <= 14) ):
interval = 1 # show every hour
elif ( (15 <= numHours) and (numHours <= 30) ):
interval = 2 # show every 2 hours
elif ( (30 <= numHours) and (numHours <= 45) ):
interval = 3 # show every 3 hours
elif ( (45 <= numHours) and (numHours <= 68) ):
interval = 4 # show every 4 hours
elif ( (68 <= numHours) and (numHours <= 90) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range(0, 60)
if ( numMinutes > (10.0 * numticks) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range(0, 60)
if ( numSeconds > (10.0 * numticks) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval=interval, \
dtstart=dmin, until=dmax, \
bymonth=bymonth, bymonthday=bymonthday, \
byhour=byhour, byminute = byminute, \
bysecond=bysecond )
locator = RRuleLocator(rrule, self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = { 'month' : month,
'day' : day,
'hour' : 0,
'minute' : 0,
'second' : 0,
'tzinfo' : tz
}
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 365
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year>=ymax: return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None: bymonth=range(1,13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 30
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 7
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None: bymonthday=range(1,32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None: byhour=range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; use for
intelligent autoscaling
"""
return 1/24.
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None: byminute=range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None: bysecond=range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60*60)
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2-d1
mus = abs(delta.days*MUSECONDS_PER_DAY + delta.seconds*1e6 +
delta.microseconds)
assert(mus<epsilon)
def _close_to_num(o1, o2, epsilon=5):
'Assert that float ordinals *o1* and *o2* are within *epsilon* microseconds.'
delta = abs((o2-o1)*MUSECONDS_PER_DAY)
assert(delta<epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24.*3600.
return 719163 + np.asarray(e)/spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24.*3600.
return (np.asarray(d)-719163)*spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar: return ret[0]
else: return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span==0: span = 1/24.
minutes = span*24*60
hours = span*24
days = span
weeks = span/7.
months = span/31. # approx
years = span/365.
if years>numticks:
locator = YearLocator(int(years/numticks), tz=tz) # define
fmt = '%Y'
elif months>numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks>numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days>numticks:
locator = DayLocator(interval=int(math.ceil(days/numticks)), tz=tz)
fmt = '%b %d'
elif hours>numticks:
locator = HourLocator(interval=int(math.ceil(hours/numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes>numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes/numticks)), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s)/SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m)/MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h/24.
def weeks(w):
'Return weeks as days.'
return w*7.
class DateConverter(units.ConversionInterface):
def axisinfo(unit):
'return the unit AxisInfo'
if unit=='date':
majloc = AutoDateLocator()
majfmt = AutoDateFormatter(majloc)
return units.AxisInfo(
majloc = majloc,
majfmt = majfmt,
label='',
)
else: return None
axisinfo = staticmethod(axisinfo)
def convert(value, unit):
if units.ConversionInterface.is_numlike(value): return value
return date2num(value)
convert = staticmethod(convert)
def default_units(x):
'Return the default unit for *x* or None'
return 'date'
default_units = staticmethod(default_units)
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
if __name__=='__main__':
#tz = None
tz = pytz.timezone('US/Pacific')
#tz = UTC
dt = datetime.datetime(1011, 10, 9, 13, 44, 22, 101010, tzinfo=tz)
x = date2num(dt)
_close_to_dt(dt, num2date(x, tz))
#tz = _get_rc_timezone()
d1 = datetime.datetime( 2000, 3, 1, tzinfo=tz)
d2 = datetime.datetime( 2000, 3, 5, tzinfo=tz)
#d1 = datetime.datetime( 2002, 1, 5, tzinfo=tz)
#d2 = datetime.datetime( 2003, 12, 1, tzinfo=tz)
delta = datetime.timedelta(hours=6)
dates = drange(d1, d2, delta)
# MGDTODO: Broken on transforms branch
#print 'orig', d1
#print 'd2n and back', num2date(date2num(d1), tz)
from _transforms import Value, Interval
v1 = Value(date2num(d1))
v2 = Value(date2num(d2))
dlim = Interval(v1,v2)
vlim = Interval(v1,v2)
#locator = HourLocator(byhour=(3,15), tz=tz)
#locator = MinuteLocator(byminute=(15,30,45), tz=tz)
#locator = YearLocator(base=5, month=7, day=4, tz=tz)
#locator = MonthLocator(bymonthday=15)
locator = DayLocator(tz=tz)
locator.set_data_interval(dlim)
locator.set_view_interval(vlim)
dmin, dmax = locator.autoscale()
vlim.set_bounds(dmin, dmax)
ticks = locator()
fmt = '%Y-%m-%d %H:%M:%S %Z'
formatter = DateFormatter(fmt, tz)
#for t in ticks: print formatter(t)
for t in dates: print formatter(t)
| agpl-3.0 |
daniilsorokin/Web-Mining-Exercises | src/correlations.py | 1 | 1064 | '''
Created on Jun 21, 2015
@author: Daniil Sorokin<sorokin@ukp.informatik.tu-darmstadt.de>
'''
import argparse
from vector_representation import read_vectors_from_csv
from classfiers import NBClassifier
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('train_file', type=str)
params = parser.parse_args()
train_data = read_vectors_from_csv(params.train_file)
print("Building a model.")
classifier = NBClassifier()
classifier.train(train_data)
means_v, variance_v = classifier.get_model()
num_features = 20
index = np.arange(num_features)
bar_width = 0.05
for i, clss in enumerate(means_v.keys()):
plt.bar(index + (bar_width*i), means_v[clss][:num_features], width=bar_width,
yerr=variance_v[clss][:num_features])
plt.show()
# obs = np.array([[14452, 4073, 4287], [30864, 11439, 9887]])
# chi2, p, dof, expected = stats.chi2_contingency(obs)
# print(p)
| mit |
kkk669/mxnet | python/mxnet/model.py | 17 | 39894 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
PredictiveScienceLab/GPy | GPy/testing/likelihood_tests.py | 8 | 35323 | # Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import unittest
import GPy
from GPy.models import GradientChecker
import functools
import inspect
from GPy.likelihoods import link_functions
from functools import partial
fixed_seed = 7
#np.seterr(divide='raise')
def dparam_partial(inst_func, *args):
"""
If we have a instance method that needs to be called but that doesn't
take the parameter we wish to change to checkgrad, then this function
will change the variable using set params.
inst_func: should be a instance function of an object that we would like
to change
param: the param that will be given to set_params
args: anything else that needs to be given to the function (for example
the f or Y that are being used in the function whilst we tweak the
param
"""
def param_func(param_val, param_name, inst_func, args):
#inst_func.__self__._set_params(param)
#inst_func.__self__.add_parameter(Param(param_name, param_val))
inst_func.__self__[param_name] = param_val
return inst_func(*args)
return functools.partial(param_func, inst_func=inst_func, args=args)
def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, randomize=False, verbose=False):
"""
checkgrad expects a f: R^N -> R^1 and df: R^N -> R^N
However if we are holding other parameters fixed and moving something else
We need to check the gradient of each of the fixed parameters
(f and y for example) seperately, whilst moving another parameter.
Otherwise f: gives back R^N and
df: gives back R^NxM where M is
The number of parameters and N is the number of data
Need to take a slice out from f and a slice out of df
"""
print("\n{} likelihood: {} vs {}".format(func.__self__.__class__.__name__,
func.__name__, dfunc.__name__))
partial_f = dparam_partial(func, *args)
partial_df = dparam_partial(dfunc, *args)
gradchecking = True
zipped_params = zip(params, params_names)
for param_ind, (param_val, param_name) in enumerate(zipped_params):
#Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter
f_ = partial_f(param_val, param_name)
df_ = partial_df(param_val, param_name)
#Reshape it such that we have a 3d matrix incase, that is we want it (?, N, D) regardless of whether ? is num_params or not
f_ = f_.reshape(-1, f_.shape[0], f_.shape[1])
df_ = df_.reshape(-1, f_.shape[0], f_.shape[1])
#Get the number of f and number of dimensions
fnum = f_.shape[-2]
fdim = f_.shape[-1]
dfnum = df_.shape[-2]
for fixed_val in range(dfnum):
#dlik and dlik_dvar gives back 1 value for each
f_ind = min(fnum, fixed_val+1) - 1
print("fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val))
#Make grad checker with this param moving, note that set_params is NOT being called
#The parameter is being set directly with __setattr__
#Check only the parameter and function value we wish to check at a time
#func = lambda p_val, fnum, fdim, param_ind, f_ind, param_ind: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
#dfunc_dparam = lambda d_val, fnum, fdim, param_ind, fixed_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
#First we reshape the output such that it is (num_params, N, D) then we pull out the relavent parameter-findex and checkgrad just this index at a time
func = lambda p_val: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
dfunc_dparam = lambda d_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
grad = GradientChecker(func, dfunc_dparam, param_val, [param_name])
if constraints is not None:
for constrain_param, constraint in constraints:
if grad.grep_param_names(constrain_param):
constraint(constrain_param, grad)
else:
print("parameter didn't exist")
print(constrain_param, " ", constraint)
if randomize:
grad.randomize()
if verbose:
print(grad)
grad.checkgrad(verbose=1)
if not grad.checkgrad(verbose=True):
gradchecking = False
if not grad.checkgrad(verbose=True):
gradchecking = False
return gradchecking
from nose.tools import with_setup
class TestNoiseModels(object):
"""
Generic model checker
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 3
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.binary_Y = np.asarray(np.random.rand(self.N) > 0.5, dtype=np.int)[:, None]
self.positive_Y = np.exp(self.Y.copy())
tmp = np.round(self.X[:, 0]*3-3)[:, None] + np.random.randint(0,3, self.X.shape[0])[:, None]
self.integer_Y = np.where(tmp > 0, tmp, 0)
self.var = 0.2
self.deg_free = 4.0
#Make a bigger step as lower bound can be quite curved
self.step = 1e-4
"""
Dictionary where we nest models we would like to check
Name: {
"model": model_instance,
"grad_params": {
"names": [names_of_params_we_want, to_grad_check],
"vals": [values_of_params, to_start_at],
"constrain": [constraint_wrappers, listed_here]
},
"laplace": boolean_of_whether_model_should_work_for_laplace,
"ep": boolean_of_whether_model_should_work_for_laplace,
"link_f_constraints": [constraint_wrappers, listed_here]
}
"""
self.noise_models = {"Student_t_default": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
#"Student_t_deg_free": {
#"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
#"grad_params": {
#"names": [".*deg_free"],
#"vals": [self.deg_free],
#"constraints": [(".*t_scale2", self.constrain_fixed), (".*deg_free", self.constrain_positive)]
#},
#"laplace": True
#},
"Student_t_1_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [1.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_small_deg_free": {
"model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_small_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [0.001],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_large_var": {
"model": GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [10.0],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
"Student_t_approx_gauss": {
"model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var),
"grad_params": {
"names": [".*t_scale2"],
"vals": [self.var],
"constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
},
"laplace": True
},
#"Student_t_log": {
#"model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var),
#"grad_params": {
#"names": [".*t_noise"],
#"vals": [self.var],
#"constraints": [(".*t_noise", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
#},
#"laplace": True
#},
"Gaussian_default": {
"model": GPy.likelihoods.Gaussian(variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True,
},
"Gaussian_log": {
"model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var),
"grad_params": {
"names": [".*variance"],
"vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)]
},
"laplace": True,
"variational_expectations": True
},
#"Gaussian_probit": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
#"Gaussian_log_ex": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N),
#"grad_params": {
#"names": ["noise_model_variance"],
#"vals": [self.var],
#"constraints": [constrain_positive]
#},
#"laplace": True
#},
"Bernoulli_default": {
"model": GPy.likelihoods.Bernoulli(),
"link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"laplace": True,
"Y": self.binary_Y,
"ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True
},
"Exponential_default": {
"model": GPy.likelihoods.Exponential(),
"link_f_constraints": [self.constrain_positive],
"Y": self.positive_Y,
"laplace": True,
},
"Poisson_default": {
"model": GPy.likelihoods.Poisson(),
"link_f_constraints": [self.constrain_positive],
"Y": self.integer_Y,
"laplace": True,
"ep": False #Should work though...
},
#,
#GAMMA needs some work!"Gamma_default": {
#"model": GPy.likelihoods.Gamma(),
#"link_f_constraints": [constrain_positive],
#"Y": self.positive_Y,
#"laplace": True
#}
}
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(self, regex, model):
model[regex].constrain_fixed()
def constrain_negative(self, regex, model):
model[regex].constrain_negative()
def constrain_positive(self, regex, model):
model[regex].constrain_positive()
def constrain_fixed_below(self, regex, model, up_to):
model[regex][0:up_to].constrain_fixed()
def constrain_fixed_above(self, regex, model, above):
model[regex][above:].constrain_fixed()
def constrain_bounded(self, regex, model, lower, upper):
"""
Used like: partial(constrain_bounded, lower=0, upper=1)
"""
model[regex].constrain_bounded(lower, upper)
def tearDown(self):
self.Y = None
self.f = None
self.X = None
def test_scale2_models(self):
self.setUp()
for name, attributes in self.noise_models.items():
model = attributes["model"]
if "grad_params" in attributes:
params = attributes["grad_params"]
param_vals = params["vals"]
param_names= params["names"]
param_constraints = params["constraints"]
else:
params = []
param_vals = []
param_names = []
constrain_positive = []
param_constraints = []
if "link_f_constraints" in attributes:
link_f_constraints = attributes["link_f_constraints"]
else:
link_f_constraints = []
if "Y" in attributes:
Y = attributes["Y"].copy()
else:
Y = self.Y.copy()
if "f" in attributes:
f = attributes["f"].copy()
else:
f = self.f.copy()
if "Y_metadata" in attributes:
Y_metadata = attributes["Y_metadata"].copy()
else:
Y_metadata = None
if "laplace" in attributes:
laplace = attributes["laplace"]
else:
laplace = False
if "ep" in attributes:
ep = attributes["ep"]
else:
ep = False
if "variational_expectations" in attributes:
var_exp = attributes["variational_expectations"]
else:
var_exp = False
#if len(param_vals) > 1:
#raise NotImplementedError("Cannot support multiple params in likelihood yet!")
#Required by all
#Normal derivatives
yield self.t_logpdf, model, Y, f, Y_metadata
yield self.t_dlogpdf_df, model, Y, f, Y_metadata
yield self.t_d2logpdf_df2, model, Y, f, Y_metadata
#Link derivatives
yield self.t_dlogpdf_dlink, model, Y, f, Y_metadata, link_f_constraints
yield self.t_d2logpdf_dlink2, model, Y, f, Y_metadata, link_f_constraints
if laplace:
#Laplace only derivatives
yield self.t_d3logpdf_df3, model, Y, f, Y_metadata
yield self.t_d3logpdf_dlink3, model, Y, f, Y_metadata, link_f_constraints
#Params
yield self.t_dlogpdf_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_df_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_df2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#Link params
yield self.t_dlogpdf_link_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_dlink_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#laplace likelihood gradcheck
yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if ep:
#ep likelihood gradcheck
yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if var_exp:
#Need to specify mu and var!
yield self.t_varexp, model, Y, Y_metadata
yield self.t_dexp_dmu, model, Y, Y_metadata
yield self.t_dexp_dvar, model, Y, Y_metadata
self.tearDown()
#############
# dpdf_df's #
#############
@with_setup(setUp, tearDown)
def t_logpdf(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
#print model._get_params()
np.testing.assert_almost_equal(
model.pdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).prod(),
np.exp(model.logpdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).sum())
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
self.description = "\n{}".format(inspect.stack()[0][3])
logpdf = functools.partial(np.sum(model.logpdf), y=Y, Y_metadata=Y_metadata)
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_df2(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_df3(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g')
grad.randomize()
print(model)
assert grad.checkgrad(verbose=1)
##############
# df_dparams #
##############
@with_setup(setUp, tearDown)
def t_dlogpdf_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_df_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_df2_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta,
params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# dpdf_dlink's #
################
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
logpdf = functools.partial(model.logpdf_link, y=Y, Y_metadata=Y_metadata)
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d2logpdf_dlink2(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_d3logpdf_dlink3(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3]))
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g')
#Apply constraints to link_f values
for constraint in link_f_constraints:
constraint('g', grad)
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
#################
# dlink_dparams #
#################
@with_setup(setUp, tearDown)
def t_dlogpdf_link_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_dlogpdf_dlink_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
@with_setup(setUp, tearDown)
def t_d2logpdf2_dlink2_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3]))
print(model)
assert (
dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta,
params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True)
)
################
# laplace test #
################
@with_setup(setUp, tearDown)
def t_laplace_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
#Normalize
Y = Y/Y.max()
white_var = 1e-5
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
m['.*white'].constrain_fixed(white_var)
#Set constraints
for constrain_param, constraint in constraints:
constraint(constrain_param, m)
print(m)
m.randomize()
m.randomize()
#Set params
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
#m.optimize(max_iters=8)
print(m)
#if not m.checkgrad(step=step):
#m.checkgrad(verbose=1, step=step)
#NOTE this test appears to be stochastic for some likelihoods (student t?)
# appears to all be working in test mode right now...
#if isinstance(model, GPy.likelihoods.StudentT):
assert m.checkgrad(verbose=1, step=step)
###########
# EP test #
###########
@with_setup(setUp, tearDown)
def t_ep_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3]))
#Normalize
Y = Y/Y.max()
white_var = 1e-6
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
ep_inf = GPy.inference.latent_function_inference.EP()
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=ep_inf)
m['.*white'].constrain_fixed(white_var)
for param_num in range(len(param_names)):
name = param_names[param_num]
m[name] = param_vals[param_num]
constraints[param_num](name, m)
m.randomize()
print(m)
assert m.checkgrad(verbose=1, step=step)
################
# variational expectations #
################
@with_setup(setUp, tearDown)
def t_varexp(self, model, Y, Y_metadata):
#Test that the analytic implementation (if it exists) matches the generic gauss
#hermite implementation
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = model.variational_expectations(Y=Y, m=mu, v=var, gh_points=None, Y_metadata=Y_metadata)[0]
#Implementation of gauss hermite integration
shape = mu.shape
gh_x, gh_w= np.polynomial.hermite.hermgauss(50)
m,v,Y = mu.flatten(), var.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = model.logpdf(X, Y[:,None], Y_metadata=Y_metadata)
#average over the gird to get derivatives of the Gaussian's parameters
#division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)
expectation_gh = np.dot(logp, gh_w)/np.sqrt(np.pi)
expectation_gh = expectation_gh.reshape(*shape)
np.testing.assert_almost_equal(expectation, expectation_gh, decimal=5)
@with_setup(setUp, tearDown)
def t_dexp_dmu(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, v=var, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(mu):
return expectation(m=mu)[0]
def dmu(mu):
return expectation(m=mu)[1]
grad = GradientChecker(F, dmu, mu.copy(), 'm')
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_dexp_dvar(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, m=mu, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(var):
return expectation(v=var)[0]
def dvar(var):
return expectation(v=var)[2]
grad = GradientChecker(F, dvar, var.copy(), 'v')
self.constrain_positive('v', grad)
#grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
class LaplaceTests(unittest.TestCase):
"""
Specific likelihood tests, not general enough for the above tests
"""
def setUp(self):
np.random.seed(fixed_seed)
self.N = 15
self.D = 1
self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1
noise = np.random.randn(*self.X[:, 0].shape)*self.real_std
self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None]
self.f = np.random.rand(self.N, 1)
self.var = 0.2
self.var = np.random.rand(1)
self.stu_t = GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var)
#TODO: gaussians with on Identity link. self.gauss = GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var)
self.gauss = GPy.likelihoods.Gaussian(variance=self.var)
#Make a bigger step as lower bound can be quite curved
self.step = 1e-6
def tearDown(self):
self.stu_t = None
self.gauss = None
self.Y = None
self.f = None
self.X = None
def test_gaussian_d2logpdf_df2_2(self):
print("\n{}".format(inspect.stack()[0][3]))
self.Y = None
self.N = 2
self.D = 1
self.X = np.linspace(0, self.D, self.N)[:, None]
self.real_std = 0.2
noise = np.random.randn(*self.X.shape)*self.real_std
self.Y = np.sin(self.X*2*np.pi) + noise
self.f = np.random.rand(self.N, 1)
dlogpdf_df = functools.partial(self.gauss.dlogpdf_df, y=self.Y)
d2logpdf_df2 = functools.partial(self.gauss.d2logpdf_df2, y=self.Y)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, self.f.copy(), 'g')
grad.randomize()
self.assertTrue(grad.checkgrad(verbose=1))
def test_laplace_log_likelihood(self):
debug = False
real_std = 0.1
initial_var_guess = 0.5
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
#Yc = Y.copy()
#Yc[75:80] += 1
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#FIXME: Make sure you can copy kernels when params is fixed
#kernel2 = kernel1.copy()
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
gauss_distr1 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf)
m1['.*white'].constrain_fixed(1e-6)
m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m1.randomize()
gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf)
m2['.*white'].constrain_fixed(1e-6)
m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m2.randomize()
if debug:
print(m1)
print(m2)
optimizer = 'scg'
print("Gaussian")
m1.optimize(optimizer, messages=debug, ipython_notebook=False)
print("Laplace Gaussian")
m2.optimize(optimizer, messages=debug, ipython_notebook=False)
if debug:
print(m1)
print(m2)
m2[:] = m1[:]
#Predict for training points to get posterior mean and variance
post_mean, post_var = m1.predict(X)
post_mean_approx, post_var_approx, = m2.predict(X)
if debug:
from matplotlib import pyplot as pb
pb.figure(5)
pb.title('posterior means')
pb.scatter(X, post_mean, c='g')
pb.scatter(X, post_mean_approx, c='r', marker='x')
pb.figure(6)
pb.title('plot_f')
m1.plot_f(fignum=6)
m2.plot_f(fignum=6)
fig, axes = pb.subplots(2, 1)
fig.suptitle('Covariance matricies')
a1 = pb.subplot(121)
a1.matshow(m1.likelihood.covariance_matrix)
a2 = pb.subplot(122)
a2.matshow(m2.likelihood.covariance_matrix)
pb.figure(8)
pb.scatter(X, m1.likelihood.Y, c='g')
pb.scatter(X, m2.likelihood.Y, c='r', marker='x')
#Check Y's are the same
np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
#Check marginals are the same
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check marginals are the same with random
m1.randomize()
m2[:] = m1[:]
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check they are checkgradding
#m1.checkgrad(verbose=1)
#m2.checkgrad(verbose=1)
self.assertTrue(m1.checkgrad(verbose=True))
self.assertTrue(m2.checkgrad(verbose=True))
if __name__ == "__main__":
print("Running unit tests")
unittest.main()
| bsd-3-clause |
bmassman/fake_news | fake_news/pipeline/build_df.py | 1 | 1826 | #!/usr/bin/env python3
"""
Module to build dataframe of news articles from sqlite3 database.
"""
import os
import re
from urllib.parse import urlparse
import sqlite3
from contextlib import closing
from datetime import datetime
from typing import Optional
import pandas as pd
DB_FILE_NAME = os.path.join('fake_news', 'articles.db')
def get_url_base(row):
"""Return base url from article row."""
url = row['url']
domain_ending = re.compile(r'\.[a-z]{2,3}$')
domain_prefix = re.compile(r'^[a-z]+\.')
domains_to_keep = {'wsj', 'cnn', 'cbs', 'nbc', 'bbc', 'de'}
net_loc = urlparse(url).netloc
while net_loc.startswith('www.'):
net_loc = net_loc[4:]
while True:
match = domain_ending.search(net_loc)
if not match or match.group(0) in domains_to_keep:
break
net_loc = domain_ending.sub('', net_loc)
net_loc = domain_prefix.sub('', net_loc)
return net_loc
def count_words(row):
"""Count words in text from article row."""
text = row['text']
return len(text.split())
def build_df(table: str = 'articles',
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None) -> pd.DataFrame:
"""Build dataframe with derived fields."""
with closing(sqlite3.connect(DB_FILE_NAME)) as conn:
articles = pd.read_sql_query(f'select * from {table}', conn)
articles['date'] = pd.to_datetime(articles['publish_date'])
if start_date:
articles = articles.loc[articles['date'] >= start_date]
if end_date:
articles = articles.loc[articles['date'] <= end_date]
articles = articles.replace([None], [''], regex=True)
articles['base_url'] = articles.apply(get_url_base, axis=1)
articles['word_count'] = articles.apply(count_words, axis=1)
return articles
| mit |
ssaeger/scikit-learn | benchmarks/bench_isolation_forest.py | 40 | 3136 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http']#, 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
mattjj/pyhawkes | experiments/harness.py | 2 | 16840 | """
Test harness for fitting the competing models.
"""
import time
import cPickle
import copy
import os
import gzip
import numpy as np
from collections import namedtuple
from pybasicbayes.util.text import progprint_xrange
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
from pyhawkes.utils.utils import convert_discrete_to_continuous
from pyhawkes.models import DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeNetworkHawkesModelGammaMixtureSBM, \
DiscreteTimeNetworkHawkesModelSpikeAndSlab, \
ContinuousTimeNetworkHawkesModel
from pyhawkes.standard_models import ReluNonlinearHawkesProcess, StandardHawkesProcess, ExpNonlinearHawkesProcess, HomogeneousPoissonProcess
Results = namedtuple("Results", ["samples", "timestamps", "lps", "test_lls"])
def fit_homogeneous_pp_model(S, S_test, dt, dt_max, output_path,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
test_model = HomogeneousPoissonProcess(K=K, dt=dt, dt_max=dt_max, **model_args)
test_model.add_data(S)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
return results
def fit_standard_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
standard_model=None,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading standard BFGS results from ", output_path
results = cPickle.load(f)
else:
# Split into test and training
xv_len = 1000
S_train = S[:-xv_len]
S_xv = S[xv_len:]
def _fit(lmbda):
print "Fitting a standard Hawkes model using BFGS"
test_model = StandardHawkesProcess(K=K, dt=dt, dt_max=dt_max, lmbda=lmbda, **model_args)
test_model.add_data(S_train)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Fit with BFGS
tic = time.clock()
test_model.fit_with_bfgs()
init_time = time.clock() - tic
lps.append(test_model.log_likelihood())
hlls.append(test_model.heldout_log_likelihood(S_test))
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0, init_time])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
# Compute cross validation log likelihood
xv_ll = test_model.heldout_log_likelihood(S_xv)
return xv_ll, results
# Fit models with a range of regularization parameters
lmbdas = [.1, 1., 5., 10.]
xv_lls = []
xv_results = []
for lmbda in lmbdas:
xv_ll, results = _fit(lmbda)
xv_lls.append(xv_ll)
xv_results.append(results)
# Find the best
for lmbda, xv_ll in zip(lmbdas, xv_lls):
print "Lambda: ", lmbda, "\tXV LL: ", xv_ll
best = np.argmax(xv_lls)
results = xv_results[best]
print "Best: ", best
# Save the model
with gzip.open(output_path, 'w') as f:
print "Saving std BFGS results to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_nonlinear_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading nonlinear BFGS results from ", output_path
results = cPickle.load(f)
else:
# Split into test and training
xv_len = int(0.01 * len(S))
S_train = S[:-xv_len]
S_xv = S[xv_len:]
def _fit(lmbda):
print "Fitting a nonlinear Hawkes model using BFGS"
test_model = ReluNonlinearHawkesProcess(K=K, dt=dt, dt_max=dt_max, lmbda=lmbda, **model_args)
test_model.add_data(S_train)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Fit with BFGS
tic = time.clock()
test_model.fit_with_bfgs()
init_time = time.clock() - tic
lps.append(test_model.log_likelihood())
hlls.append(test_model.heldout_log_likelihood(S_test))
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0, init_time])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
# Compute cross validation log likelihood
xv_ll = test_model.heldout_log_likelihood(S_xv)
return xv_ll, results
# Fit models with a range of regularization parameters
lmbdas = [.1, 1., 5., 10.]
xv_lls = []
xv_results = []
for lmbda in lmbdas:
xv_ll, results = _fit(lmbda)
xv_lls.append(xv_ll)
xv_results.append(results)
# Find the best
for lmbda, xv_ll in zip(lmbdas, xv_lls):
print "Lambda: ", lmbda, "\tXV LL: ", xv_ll
best = np.argmax(xv_lls)
results = xv_results[best]
print "Best: ", best
# Save the model
with gzip.open(output_path, 'w') as f:
print "Saving nonlinear BFGS results to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_spikeslab_network_hawkes_gibbs(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading Gibbs results from ", output_path
results = cPickle.load(f)
else:
print "Fitting the data with a network Hawkes model using Gibbs sampling"
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, **model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# TODO: Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Gibbs sample
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = [0]
for _ in progprint_xrange(N_samples, perline=10):
# Update the model
tic = time.time()
test_model.resample_model()
samples.append(copy.deepcopy(test_model.get_parameters()))
times.append(time.time() - tic)
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
# # Save this sample
# with open(output_path + ".gibbs.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print "Saving Gibbs samples to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_ct_network_hawkes_gibbs(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
K = S.shape[1]
S_ct, C_ct, T = convert_discrete_to_continuous(S, dt)
S_test_ct, C_test_ct, T_test = convert_discrete_to_continuous(S_test, dt)
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading Gibbs results from ", output_path
results = cPickle.load(f)
else:
print "Fitting the data with a continuous time network Hawkes model using Gibbs sampling"
test_model = \
ContinuousTimeNetworkHawkesModel(K, dt_max=dt_max, **model_args)
test_model.add_data(S_ct, C_ct, T)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Gibbs sample
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test_ct, C_test_ct, T_test)]
times = [0]
for _ in progprint_xrange(N_samples, perline=25):
# Update the model
tic = time.time()
test_model.resample_model()
times.append(time.time() - tic)
samples.append(copy.deepcopy(test_model.get_parameters()))
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test_ct, C_test_ct, T_test))
# # Save this sample
# with open(output_path + ".gibbs.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print "Saving Gibbs samples to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_network_hawkes_vb(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading VB results from ", output_path
results = cPickle.load(f)
else:
print "Fitting the data with a network Hawkes model using batch VB"
test_model = DiscreteTimeNetworkHawkesModelGammaMixtureSBM(K=K, dt=dt, dt_max=dt_max,
**model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Batch variational inference
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = [0]
for itr in progprint_xrange(N_samples):
# Update the model
tic = time.time()
test_model.meanfield_coordinate_descent_step()
times.append(time.time() - tic)
# Resample from variational posterior to compute log prob and hlls
test_model.resample_from_mf()
# samples.append(test_model.copy_sample())
samples.append(copy.deepcopy(test_model.get_parameters()))
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
# Save this sample
# with open(output_path + ".svi.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print "Saving VB samples to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_network_hawkes_svi(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60,
delay=10.0,
forgetting_rate=0.25):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print "Loading SVI results from ", output_path
results = cPickle.load(f)
else:
print "Fitting the data with a network Hawkes model using SVI"
test_model = DiscreteTimeNetworkHawkesModelGammaMixtureSBM(K=K, dt=dt, dt_max=dt_max,
**model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# TODO: Add the data in minibatches
minibatchsize = 3000
stepsize = (np.arange(N_samples) + delay)**(-forgetting_rate)
# Stochastic variational inference
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = [0]
for itr in progprint_xrange(N_samples):
# Update the model
tic = time.time()
test_model.sgd_step(minibatchsize=minibatchsize, stepsize=stepsize[itr])
times.append(time.time() - tic)
# Resample from variational posterior to compute log prob and hlls
test_model.resample_from_mf()
# samples.append(test_model.copy_sample())
samples.append(copy.deepcopy(test_model.get_parameters()))
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
# Save this sample
# with open(output_path + ".svi.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print "Saving SVI samples to ", output_path
cPickle.dump(results, f, protocol=-1)
return results
def fit_poisson_glm():
raise NotImplementedError | mit |
lokeshpancharia/BuildingMachineLearningSystemsWithPython | ch10/neighbors.py | 21 | 1787 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
import numpy as np
import mahotas as mh
from glob import glob
from features import texture, color_histogram
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance
basedir = '../SimpleImageDataset/'
haralicks = []
chists = []
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
images.sort()
for fname in images:
imc = mh.imread(fname)
imc = imc[200:-200,200:-200]
haralicks.append(texture(mh.colors.rgb2grey(imc)))
chists.append(color_histogram(imc))
haralicks = np.array(haralicks)
chists = np.array(chists)
features = np.hstack([chists, haralicks])
print('Computing neighbors...')
sc = StandardScaler()
features = sc.fit_transform(features)
dists = distance.squareform(distance.pdist(features))
print('Plotting...')
fig, axes = plt.subplots(2, 9, figsize=(16,8))
# Remove ticks from all subplots
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
fig.tight_layout()
fig.savefig('figure_neighbors.png', dpi=300)
| mit |
mmottahedi/neuralnilm_prototype | scripts/e507.py | 2 | 6411 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
N_SEGMENTS = 3
MAX_TARGET_POWER = 300
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
subsample_target=1,
ignore_incomplete=True,
allow_incomplete=True,
include_all=False,
skip_probability=0.25,
offset_probability=0,
target_is_start_and_end_and_mean=True,
y_processing_func=lambda y: y / MAX_TARGET_POWER
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=Plotter(n_seq_to_plot=32)
plotter=StartEndMeanPlotter(n_seq_to_plot=16, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
target_seq_length = source.output_shape_after_processing()[1]
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': target_seq_length,
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 800)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e507.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
carrillo/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
paulirish/dadi | dadi/__init__.py | 10 | 1326 | """
For examples of dadi's usage, see the examples directory in the source
distribution.
Documentation of all methods can be found in doc/api/index.html of the source
distribution.
"""
import logging
logging.basicConfig()
import Demographics1D
import Demographics2D
import Inference
import Integration
import Misc
import Numerics
import PhiManip
# Protect import of Plotting in case matplotlib not installed.
try:
import Plotting
except ImportError:
pass
# We do it this way so it's easier to reload.
import Spectrum_mod
Spectrum = Spectrum_mod.Spectrum
try:
# This is to try and ensure we have a nice __SVNVERSION__ attribute, so
# when we get bug reports, we know what version they were using. The
# svnversion file is created by setup.py.
import os
_directory = os.path.dirname(Integration.__file__)
_svn_file = os.path.join(_directory, 'svnversion')
__SVNVERSION__ = file(_svn_file).read().strip()
except:
__SVNVERSION__ = 'Unknown'
# When doing arithmetic with Spectrum objects (which are masked arrays), we
# often have masked values which generate annoying arithmetic warnings. Here
# we tell numpy to ignore such warnings. This puts greater onus on the user to
# check results, but for our use case I think it's the better default.
import numpy
numpy.seterr(all='ignore')
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/datasets/tests/test_base.py | 13 | 8907 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
meee1/pymavlink | examples/mavgraph.py | 1 | 5880 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, pytz, matplotlib
from math import *
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
if xrange < 1000:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
else:
formatter = matplotlib.dates.DateFormatter('%H:%M')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle='-', marker='None', tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
if ax2_labels == []:
ax1.legend(ax1_labels,loc='upper right')
else:
ax1.legend(ax1_labels,loc='upper left')
if ax2_labels != []:
ax2.legend(ax2_labels,loc='upper right')
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
y[i].append(v)
x[i].append(t)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| lgpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/covariance/plot_lw_vs_oas.py | 1 | 3759 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
#plt.show()
pltshow(plt)
| mit |
blancha/abcngspipelines | rnaseq/cuffdiff.py | 1 | 3767 | #!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
from collections import OrderedDict
import glob
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates Cuffdiff scripts, in the old format.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="cuffdiff")
parser.add_argument("-i", "--inputDirectory", help="Input directory with BAM files.", default="../results/tophat")
parser.add_argument("-o", "--outputDirectory", help="Output directory with Cuffdiff results.", default="../results/cuffdiff")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
# Check if the inputDirectory exists, and is a directory.
util.checkInputDirectory(inputDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
stranded = config.getboolean("project", "stranded")
genome = config.get("project", "genome")
genomeFile = config.get(genome, "genomeFile")
gtfFile = config.get(genome, "gtfFile")
processors = config.get("cuffdiff", "processors")
# Read samples file. Create file first if it doesn't exist.
if not os.path.exists("samples.txt"):
subprocess.call("samples.py", shell=True)
samplesFile = pandas.read_csv("samples.txt", delim_whitespace=True)
# Get samples
samples = samplesFile["sample"]
conditions = samplesFile["group"]
unique_conditions = list(OrderedDict.fromkeys(conditions)) # Remove duplicates.
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directory, if it does not exist yet.
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Write the cuffdiff script.
scriptName = "cuffdiff.sh"
script = open(scriptName, 'w')
if header:
util.writeHeader(script, config, "cuffdiff")
script.write("cuffdiff" + " \\\n")
script.write("--labels ")
script.write(",".join(unique_conditions) + " \\\n")
script.write("-p " + processors + " \\\n")
script.write("--no-effective-length-correction " + "\\\n")
if stranded:
script.write("--library-type fr-firststrand" + " \\\n")
script.write("-u -b " + genomeFile + " \\\n")
script.write("-o " + os.path.relpath(os.path.join(outputDirectory)) + " \\\n")
script.write(gtfFile + " \\\n")
script.write(os.path.relpath(os.path.join(inputDirectory, samples[0], "accepted_hits.bam")))
previous_condition = conditions[0]
for sample,condition in zip(samples[1:],conditions[1:]):
if condition == previous_condition:
script.write(",\\\n")
else:
script.write(" \\\n")
script.write(os.path.relpath(os.path.join(inputDirectory, sample, "accepted_hits.bam")))
previous_condition = condition
script.write(" \\\n")
script.write("&> " + scriptName + ".log")
script.close()
# Copy the CummeRbund script.
command = "cp -r " + os.path.join(config.get("server", "toolsFolder"), "abcngspipelines/rnaseq/cummerbund") + " " + os.path.dirname(scriptsDirectory)
print(command)
subprocess.call(command, shell=True)
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 |
bond-/udacity-ml | src/numpy-pandas-tutorials/quiz-avg-bronze-medals.py | 1 | 1951 | from pandas import DataFrame, Series
import numpy
def avg_medal_count():
'''
Compute the average number of bronze medals earned by countries who
earned at least one gold medal.
Save this to a variable named avg_bronze_at_least_one_gold. You do not
need to call the function in your code when running it in the browser -
the grader will do that automatically when you submit or test it.
HINT-1:
You can retrieve all of the values of a Pandas column from a
data frame, "df", as follows:
df['column_name']
HINT-2:
The numpy.mean function can accept as an argument a single
Pandas column.
For example, numpy.mean(df["col_name"]) would return the
mean of the values located in "col_name" of a dataframe df.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
olympic_medal_counts = {'country_name':Series(countries),
'gold': Series(gold),
'silver': Series(silver),
'bronze': Series(bronze)}
df = DataFrame(olympic_medal_counts)
# YOUR CODE HERE
at_least_one_gold = df[df['gold'] > 0]
avg_bronze_at_least_one_gold = numpy.mean(at_least_one_gold['bronze'])
return avg_bronze_at_least_one_gold
| apache-2.0 |
icexelloss/spark | python/pyspark/worker.py | 3 | 19219 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
# 'resource' is a Unix specific module.
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import local_connect_and_auth
from pyspark.taskcontext import BarrierTaskContext, TaskContext
from pyspark.files import SparkFiles
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, read_bool, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasUDFSerializer
from pyspark.sql.types import to_arrow_type, StructType
from pyspark.util import _get_argspec, fail_on_stopiteration
from pyspark import shuffle
if sys.version >= '3':
basestring = str
else:
from itertools import imap as map # use iterator map by default
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type, runner_conf, udf_index):
window_bound_types_str = runner_conf.get('pandas_window_bound_types')
window_bound_type = [t.strip().lower() for t in window_bound_types_str.split(',')][udf_index]
if window_bound_type == 'bounded':
return wrap_bounded_window_agg_pandas_udf(f, return_type)
elif window_bound_type == 'unbounded':
return wrap_unbounded_window_agg_pandas_udf(f, return_type)
else:
raise RuntimeError("Invalid window bound type: {} ".format(window_bound_type))
def wrap_unbounded_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_bounded_window_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(begin_index, end_index, *series):
import pandas as pd
result = []
# Index operation is faster on np.ndarray,
# So we turn the index series into np array
# here for performance
begin_array = begin_index.values
end_array = end_index.values
for i in range(len(begin_array)):
# Note: Create a slice from a series for each window is
# actually pretty expensive. However, there
# is no easy way to reduce cost here.
# Note: s.iloc[i : j] is about 30% faster than s[i: j], with
# the caveat that the created slices shares the same
# memory with s. Therefore, user are not allowed to
# change the value of input series inside the window
# function. It is rare that user needs to modify the
# input series in the window function, and therefore,
# it is be a reasonable restriction.
# Note: Calling reset_index on the slices will increase the cost
# of creating slices by about 100%. Therefore, for performance
# reasons we don't do it here.
series_slices = [s.iloc[begin_array[i]: end_array[i]] for s in series]
result.append(f(*series_slices))
return pd.Series(result)
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(row_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = _get_argspec(row_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type, runner_conf, udf_index)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
runner_conf = {}
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
# Load conf used for pandas_udf evaluation
num_conf = read_int(infile)
for i in range(num_conf):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
runner_conf[k] = v
# NOTE: if timezone is set here, that implies respectSessionTimeZone is True
timezone = runner_conf.get("spark.sql.session.timeZone", None)
safecheck = runner_conf.get("spark.sql.execution.pandas.arrowSafeTypeConversion",
"false").lower() == 'true'
# Used by SQL_GROUPED_MAP_PANDAS_UDF and SQL_SCALAR_PANDAS_UDF when returning StructType
assign_cols_by_name = runner_conf.get(
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName", "true")\
.lower() == "true"
# Scalar Pandas UDF handles struct type arguments as pandas DataFrames instead of
# pandas Series. See SPARK-27240.
df_for_struct = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF
ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, assign_cols_by_name,
df_for_struct)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
num_udfs = read_int(infile)
udfs = {}
call_udf = []
mapper_str = ""
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# Create function like this:
# lambda a: f([a[0]], [a[0], a[1]])
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
udfs['f'] = udf
split_offset = arg_offsets[0] + 1
arg0 = ["a[%d]" % o for o in arg_offsets[1: split_offset]]
arg1 = ["a[%d]" % o for o in arg_offsets[split_offset:]]
mapper_str = "lambda a: f([%s], [%s])" % (", ".join(arg0), ", ".join(arg1))
else:
# Create function like this:
# lambda a: (f0(a[0]), f1(a[1], a[2]), f2(a[3]))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=i)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# read inputs only for a barrier task
isBarrier = read_bool(infile)
boundPort = read_int(infile)
secret = UTF8Deserializer().loads(infile)
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
if memory_limit_mb > 0 and has_resource_module:
total_memory = resource.RLIMIT_AS
try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
# convert to bytes
new_limit = memory_limit_mb * 1024 * 1024
if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
msg = "Setting mem limits to {0} of max {1}\n".format(new_limit, new_limit)
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
except (resource.error, OSError, ValueError) as e:
# not all systems support resource limits, so warn instead of failing
print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
# initialize global state
taskContext = None
if isBarrier:
taskContext = BarrierTaskContext._getOrCreate()
BarrierTaskContext._initialize(boundPort, secret)
else:
taskContext = TaskContext._getOrCreate()
# read inputs for TaskContext info
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
needs_broadcast_decryption_server = read_bool(infile)
num_broadcast_variables = read_int(infile)
if needs_broadcast_decryption_server:
# read the decrypted data from a server in the jvm
port = read_int(infile)
auth_secret = utf8_deserializer.loads(infile)
(broadcast_sock_file, _) = local_connect_and_auth(port, auth_secret)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
if needs_broadcast_decryption_server:
read_bid = read_long(broadcast_sock_file)
assert(read_bid == bid)
_broadcastRegistry[bid] = \
Broadcast(sock_file=broadcast_sock_file)
else:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
if needs_broadcast_decryption_server:
broadcast_sock_file.write(b'1')
broadcast_sock_file.close()
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, _) = local_connect_and_auth(java_port, auth_secret)
main(sock_file, sock_file)
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/io/test_packers.py | 1 | 33322 | import datetime
from distutils.version import LooseVersion
import glob
import os
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PY3, u
from pandas.errors import PerformanceWarning
import pandas
from pandas import (
Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Panel, Period,
Series, Timestamp, bdate_range, compat, date_range, period_range)
from pandas.tests.test_panel import assert_panel_equal
import pandas.util.testing as tm
from pandas.util.testing import (
assert_categorical_equal, assert_frame_equal, assert_index_equal,
assert_series_equal, ensure_clean)
from pandas.io.packers import read_msgpack, to_msgpack
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
@pytest.fixture(scope='module')
def current_packers_data():
# our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_msgpack_data)
return create_msgpack_data()
@pytest.fixture(scope='module')
def all_packers_data():
# our all of our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_data)
return create_data()
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPackers(object):
def setup_method(self, method):
self.path = '__%s__.msg' % tm.rands(10)
def teardown_method(self, method):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
with open(p, 'wb') as fh:
fh.write(s)
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
msg = (r"Invalid file path or buffer object type: <(class|type)"
r" '{}'>")
with pytest.raises(ValueError, match=msg.format('NoneType')):
read_msgpack(path_or_buf=None)
with pytest.raises(ValueError, match=msg.format('dict')):
read_msgpack(path_or_buf={})
with pytest.raises(ValueError, match=msg.format(r'.*\.A')):
read_msgpack(path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_bool(self):
x = np.bool_(1)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
x = np.bool_(0)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
pytest.skip('numpy can not handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="complex value")
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128")
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert (all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo'), np.bool_(1)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_nat(self):
nat_rec = self.encode_decode(NaT)
assert NaT is nat_rec
def test_datetimes(self):
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_periods(self):
# 13463
for i in [Period('2010-09', 'M'), Period('2014-Q1', 'Q')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_intervals(self):
# 19967
for i in [Interval(0, 1), Interval(0, 1, 'left'),
Interval(10, 25., 'right')]:
i_rec = self.encode_decode(i)
assert i == i_rec
class TestIndex(TestPackers):
def setup_method(self, method):
super(TestIndex, self).setup_method(method)
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
'cat': tm.makeCategoricalIndex(100),
'interval': tm.makeIntervalIndex(100),
'timedelta': tm.makeTimedeltaIndex(100, 'H')
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def categorical_index(self):
# GH15487
df = DataFrame(np.random.randn(10, 2))
df = df.astype({0: 'category'}).set_index(0)
result = self.encode_decode(df)
tm.assert_frame_equal(result, df)
class TestSeries(TestPackers):
def setup_method(self, method):
super(TestSeries, self).setup_method(method)
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
'H': Categorical([1, 2, 3, 4, 5]),
'I': Categorical([1, 2, 3, 4, 5], ordered=True),
'J': (np.bool_(1), 2, 3, 4, 5),
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
self.d['cat_ordered'] = Series(data['H'])
self.d['cat_unordered'] = Series(data['I'])
self.d['numpy_bool_mixed'] = Series(data['J'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setup_method(self, method):
super(TestCategorical, self).setup_method(method)
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestNDFrame(TestPackers):
def setup_method(self, method):
super(TestNDFrame, self).setup_method(method)
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
with catch_warnings(record=True):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
packed_items = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(packed_items)
check_arbitrary(packed_items, l_rec)
# this is an oddity in that packed lists will be returned as tuples
packed_items = [self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None]
l_rec = self.encode_decode(packed_items)
assert isinstance(l_rec, tuple)
check_arbitrary(packed_items, l_rec)
def test_iterator(self):
packed_items = [self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *packed_items)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, packed_items[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
msg = r"msgpack sparse (series|frame) is not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.encode_decode(obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.loc[3:5, 1:3] = np.nan
s.loc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
"""
def setup_method(self, method):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setup_method(method)
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame({k: data[k] for k in ['A', 'A']}),
'int': DataFrame({k: data[k] for k in ['B', 'B']}),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(
self, monkeypatch, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with monkeypatch.context() as m, \
tm.assert_produces_warning(PerformanceWarning) as ws:
m.setattr(compress_module, 'decompress', decompress)
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
assert str(w.message) == ('copying data after decompressing; '
'this may mean that decompress is '
'caching its result')
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
assert buf == control_buf
def test_compression_warns_when_decompress_caches_zlib(self, monkeypatch):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression_warns_when_decompress_caches(
monkeypatch, 'zlib')
def test_compression_warns_when_decompress_caches_blosc(self, monkeypatch):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression_warns_when_decompress_caches(
monkeypatch, 'blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
assert empty_unpacked.flags.writeable
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
assert ord(b'a') == ord(u'a')
tm.assert_numpy_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='blosc')
assert 1. in self.encode_decode(df2['A'], compress='blosc')
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='zlib')
assert 1. in self.encode_decode(df2['A'], compress='zlib')
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setup_method(self, method):
super(TestEncoding, self).setup_method(method)
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame({k: data[k] for k in ['A', 'A']}),
'int': DataFrame({k: data[k] for k in ['B', 'B']}),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
assert result == expected
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
files = glob.glob(os.path.join(os.path.dirname(__file__), "data",
"legacy_msgpack", "*", "*.msgpack"))
@pytest.fixture(params=files)
def legacy_packer(request, datapath):
return datapath(request.param)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestMsgpack(object):
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
"""
minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data, version):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
assert kind in data[typ], msg
def compare(self, current_data, all_data, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < LooseVersion('0.18.0'):
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
self.check_min_structure(data, version)
for typ, dv in data.items():
assert typ in all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in current_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = current_data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comp_method, None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_msgpacks_legacy(self, current_packers_data, all_packers_data,
legacy_packer, datapath):
version = os.path.basename(os.path.dirname(legacy_packer))
# GH12142 0.17 files packed in P2 can't be read in P3
if (compat.PY3 and version.startswith('0.17.') and
legacy_packer.split('.')[-4][-1] == '2'):
msg = "Files packed in Py2 can't be read in Py3 ({})"
pytest.skip(msg.format(version))
try:
with catch_warnings(record=True):
self.compare(current_packers_data, all_packers_data,
legacy_packer, version)
except ImportError:
# blosc not installed
pass
def test_msgpack_period_freq(self):
# https://github.com/pandas-dev/pandas/issues/24135
s = Series(np.random.rand(5), index=date_range('20130101', periods=5))
r = read_msgpack(s.to_msgpack())
repr(r)
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
kazemakase/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 10 | 8104 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette
Coefficient. If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 2 <= n_labels <= n_samples-1:
raise ValueError("Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhoeutte Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
glouppe/scikit-learn | sklearn/_build_utils/__init__.py | 21 | 1125 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
| bsd-3-clause |
ubic135/odoo-design | addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ktaneishi/deepchem | contrib/dragonn/models.py | 6 | 16267 | from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from dragonn.metrics import ClassificationResult
from keras.layers.core import (Activation, Dense, Dropout, Flatten, Permute,
Reshape, TimeDistributedDense)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
from keras.layers.core import (Activation, Dense, Flatten,
TimeDistributedDense)
from keras.layers.recurrent import GRU
from keras.callbacks import EarlyStopping
#class SequenceDNN(Model):
# """
# Sequence DNN models.
#
# Parameters
# ----------
# seq_length : int, optional
# length of input sequence.
# keras_model : instance of keras.models.Sequential, optional
# seq_length or keras_model must be specified.
# num_tasks : int, optional
# number of tasks. Default: 1.
# num_filters : list[int] | tuple[int]
# number of convolutional filters in each layer. Default: (15,).
# conv_width : list[int] | tuple[int]
# width of each layer's convolutional filters. Default: (15,).
# pool_width : int
# width of max pooling after the last layer. Default: 35.
# L1 : float
# strength of L1 penalty.
# dropout : float
# dropout probability in every convolutional layer. Default: 0.
# verbose: int
# Verbosity level during training. Valida values: 0, 1, 2.
#
# Returns
# -------
# Compiled DNN model.
# """
#
# def __init__(self,
# seq_length=None,
# keras_model=None,
# use_RNN=False,
# num_tasks=1,
# num_filters=(15, 15, 15),
# conv_width=(15, 15, 15),
# pool_width=35,
# GRU_size=35,
# TDD_size=15,
# L1=0,
# dropout=0.0,
# num_epochs=100,
# verbose=1):
# self.num_tasks = num_tasks
# self.num_epochs = num_epochs
# self.verbose = verbose
# self.train_metrics = []
# self.valid_metrics = []
# if keras_model is not None and seq_length is None:
# self.model = keras_model
# self.num_tasks = keras_model.layers[-1].output_shape[-1]
# elif seq_length is not None and keras_model is None:
# self.model = Sequential()
# assert len(num_filters) == len(conv_width)
# for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
# conv_height = 4 if i == 0 else 1
# self.model.add(
# Convolution2D(
# nb_filter=nb_filter,
# nb_row=conv_height,
# nb_col=nb_col,
# activation='linear',
# init='he_normal',
# input_shape=(1, 4, seq_length),
# W_regularizer=l1(L1),
# b_regularizer=l1(L1)))
# self.model.add(Activation('relu'))
# self.model.add(Dropout(dropout))
# self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
# if use_RNN:
# num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
# self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
# self.model.add(Permute((2, 1)))
# self.model.add(GRU(GRU_size, return_sequences=True))
# self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
# self.model.add(Flatten())
# self.model.add(Dense(output_dim=self.num_tasks))
# self.model.add(Activation('sigmoid'))
# self.model.compile(optimizer='adam', loss='binary_crossentropy')
# else:
# raise ValueError(
# "Exactly one of seq_length or keras_model must be specified!")
#
# def train(self,
# X,
# y,
# validation_data,
# early_stopping_metric='Loss',
# early_stopping_patience=5,
# save_best_model_to_prefix=None):
# if y.dtype != bool:
# assert set(np.unique(y)) == {0, 1}
# y = y.astype(bool)
# multitask = y.shape[1] > 1
# if not multitask:
# num_positives = y.sum()
# num_sequences = len(y)
# num_negatives = num_sequences - num_positives
# if self.verbose >= 1:
# print('Training model (* indicates new best result)...')
# X_valid, y_valid = validation_data
# early_stopping_wait = 0
# best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
# for epoch in range(1, self.num_epochs + 1):
# self.model.fit(
# X,
# y,
# batch_size=128,
# nb_epoch=1,
# class_weight={
# True: num_sequences / num_positives,
# False: num_sequences / num_negatives
# } if not multitask else None,
# verbose=self.verbose >= 2)
# epoch_train_metrics = self.test(X, y)
# epoch_valid_metrics = self.test(X_valid, y_valid)
# self.train_metrics.append(epoch_train_metrics)
# self.valid_metrics.append(epoch_valid_metrics)
# if self.verbose >= 1:
# print('Epoch {}:'.format(epoch))
# print('Train {}'.format(epoch_train_metrics))
# print('Valid {}'.format(epoch_valid_metrics), end='')
# current_metric = epoch_valid_metrics[early_stopping_metric].mean()
# if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
# if self.verbose >= 1:
# print(' *')
# best_metric = current_metric
# best_epoch = epoch
# early_stopping_wait = 0
# if save_best_model_to_prefix is not None:
# self.save(save_best_model_to_prefix)
# else:
# if self.verbose >= 1:
# print()
# if early_stopping_wait >= early_stopping_patience:
# break
# early_stopping_wait += 1
# if self.verbose >= 1:
# print('Finished training after {} epochs.'.format(epoch))
# if save_best_model_to_prefix is not None:
# print("The best model's architecture and weights (from epoch {0}) "
# 'were saved to {1}.arch.json and {1}.weights.h5'.format(
# best_epoch, save_best_model_to_prefix))
#
# def predict(self, X):
# return self.model.predict(X, batch_size=128, verbose=False)
#
# def get_sequence_filters(self):
# """
# Returns 3D array of 2D sequence filters.
# """
# return self.model.layers[0].get_weights()[0].squeeze(axis=1)
#
# def deeplift(self, X, batch_size=200):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
# """
# assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
# from deeplift.conversion import keras_conversion as kc
#
# # convert to deeplift model and get scoring function
# deeplift_model = kc.convert_sequential_model(self.model, verbose=False)
# score_func = deeplift_model.get_target_contribs_func(
# find_scores_layer_idx=0)
# # use a 40% GC reference
# input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, :, None]]
# # get deeplift scores
# deeplift_scores = np.zeros((self.num_tasks,) + X.shape)
# for i in range(self.num_tasks):
# deeplift_scores[i] = score_func(
# task_idx=i,
# input_data_list=[X],
# batch_size=batch_size,
# progress_update=None,
# input_references_list=input_references)
# return deeplift_scores
#
# def in_silico_mutagenesis(self, X):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) ISM score array.
# """
# mutagenesis_scores = np.empty(X.shape + (self.num_tasks,), dtype=np.float32)
# wild_type_predictions = self.predict(X)
# wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
# np.newaxis]
# for sequence_index, (sequence, wild_type_prediction) in enumerate(
# zip(X, wild_type_predictions)):
# mutated_sequences = np.repeat(
# sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# # remove wild-type
# arange = np.arange(len(mutated_sequences))
# horizontal_cycle = np.tile(
# np.arange(sequence.shape[-1]), sequence.shape[-2])
# mutated_sequences[arange, :, :, horizontal_cycle] = 0
# # add mutant
# vertical_repeat = np.repeat(
# np.arange(sequence.shape[-2]), sequence.shape[-1])
# mutated_sequences[arange, :, vertical_repeat, horizontal_cycle] = 1
# # make mutant predictions
# mutated_predictions = self.predict(mutated_sequences)
# mutated_predictions = mutated_predictions.reshape(sequence.shape +
# (self.num_tasks,))
# mutagenesis_scores[
# sequence_index] = wild_type_prediction - mutated_predictions
# return np.rollaxis(mutagenesis_scores, -1)
#
# @staticmethod
# def _plot_scores(X, output_directory, peak_width, score_func, score_name):
# from dragonn.plot import plot_bases_on_ax
# scores = score_func(X).squeeze(
# axis=2) # (num_task, num_samples, num_bases, sequence_length)
# try:
# os.makedirs(output_directory)
# except OSError:
# pass
# num_tasks = len(scores)
# for task_index, task_scores in enumerate(scores):
# for sequence_index, sequence_scores in enumerate(task_scores):
# # sequence_scores is num_bases x sequence_length
# basewise_max_sequence_scores = sequence_scores.max(axis=0)
# plt.clf()
# figure, (top_axis, bottom_axis) = plt.subplots(2)
# top_axis.plot(
# range(1,
# len(basewise_max_sequence_scores) + 1),
# basewise_max_sequence_scores)
# top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
# peak_position = basewise_max_sequence_scores.argmax()
# top_axis.axvspan(
# peak_position - peak_width,
# peak_position + peak_width,
# color='grey',
# alpha=0.1)
# peak_sequence_scores = sequence_scores[:, peak_position - peak_width:
# peak_position + peak_width].T
# # Set non-max letter_heights to zero
# letter_heights = np.zeros_like(peak_sequence_scores)
# letter_heights[np.arange(len(letter_heights)),
# peak_sequence_scores.argmax(axis=1)] = \
# basewise_max_sequence_scores[peak_position - peak_width :
# peak_position + peak_width]
# plot_bases_on_ax(letter_heights, bottom_axis)
# bottom_axis.set_xticklabels(
# tuple(
# map(str,
# np.arange(peak_position - peak_width,
# peak_position + peak_width + 1))))
# bottom_axis.tick_params(axis='x', labelsize='small')
# plt.xlabel('Position')
# plt.ylabel('Score')
# plt.savefig(
# os.path.join(output_directory, 'sequence_{}{}'.format(
# sequence_index, '_task_{}'.format(task_index)
# if num_tasks > 1 else '')))
# plt.close()
#
# def plot_deeplift(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.deeplift,
# score_name='DeepLift')
#
# def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.in_silico_mutagenesis,
# score_name='ISM')
#
# def plot_architecture(self, output_file):
# from dragonn.visualize_util import plot as plot_keras_model
# plot_keras_model(self.model, output_file, show_shape=True)
#
# def save(self, save_best_model_to_prefix):
# arch_fname = save_best_model_to_prefix + '.arch.json'
# weights_fname = save_best_model_to_prefix + '.weights.h5'
# open(arch_fname, 'w').write(self.model.to_json())
# self.model.save_weights(weights_fname, overwrite=True)
#
# @staticmethod
# def load(arch_fname, weights_fname=None):
# model_json_string = open(arch_fname).read()
# sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
# if weights_fname is not None:
# sequence_dnn.model.load_weights(weights_fname)
# return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
self.model = Sequential()
self.model.add(
GRU(gru_size, return_sequences=True, input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X,
y,
batch_size=128,
nb_epoch=100,
validation_data=validation_data,
class_weight={
True: num_sequences / num_positives,
False: num_sequences / num_negatives
} if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self,
prefix='./gkmSVM',
word_length=11,
mismatches=3,
C=1,
threads=1,
cache_memory=100,
verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str,
(word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(('gkmtrain', self.options, pos_fname, neg_fname,
self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join([
'gkmpredict', test_fname, self.model_file, temp_ofp.name, threads_option
])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
| mit |
pnedunuri/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
SamHames/scikit-image | skimage/viewer/tests/test_tools.py | 1 | 6018 | from collections import namedtuple
import numpy as np
from numpy.testing import assert_equal
from numpy.testing.decorators import skipif
from skimage import data
from skimage.viewer import ImageViewer, viewer_available
from skimage.viewer.canvastools import (
LineTool, ThickLineTool, RectangleTool, PaintTool)
from skimage.viewer.canvastools.base import CanvasToolBase
from numpy.testing import assert_equal
from numpy.testing.decorators import skipif
def get_end_points(image):
h, w = image.shape[0:2]
x = [w / 3, 2 * w / 3]
y = [h / 2] * 2
return np.transpose([x, y])
def create_mouse_event(ax, button=1, xdata=0, ydata=0, key=None):
"""
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
"""
event = namedtuple('Event',
('name canvas guiEvent x y inaxes xdata ydata '
'button key step'))
event.button = button
event.x, event.y = ax.transData.transform((xdata, ydata))
event.xdata, event.ydata = xdata, ydata
event.inaxes = ax
event.canvas = ax.figure.canvas
event.key = key
event.step = 1
event.guiEvent = None
event.name = 'Custom'
return event
@skipif(not viewer_available)
def test_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = LineTool(viewer.ax, maxdist=10)
tool.end_points = get_end_points(img)
assert_equal(tool.end_points, np.array([[170, 256], [341, 256]]))
# grab a handle and move it
grab = create_mouse_event(viewer.ax, xdata=170, ydata=256)
tool.on_mouse_press(grab)
move = create_mouse_event(viewer.ax, xdata=180, ydata=260)
tool.on_move(move)
tool.on_mouse_release(move)
assert_equal(tool.geometry, np.array([[180, 260], [341, 256]]))
# create a new line
new = create_mouse_event(viewer.ax, xdata=10, ydata=10)
tool.on_mouse_press(new)
move = create_mouse_event(viewer.ax, xdata=100, ydata=100)
tool.on_move(move)
tool.on_mouse_release(move)
assert_equal(tool.geometry, np.array([[100, 100], [10, 10]]))
@skipif(not viewer_available)
def test_thick_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = ThickLineTool(viewer.ax, maxdist=10)
tool.end_points = get_end_points(img)
scroll_up = create_mouse_event(viewer.ax, button='up')
tool.on_scroll(scroll_up)
assert_equal(tool.linewidth, 2)
scroll_down = create_mouse_event(viewer.ax, button='down')
tool.on_scroll(scroll_down)
assert_equal(tool.linewidth, 1)
key_up = create_mouse_event(viewer.ax, key='+')
tool.on_key_press(key_up)
assert_equal(tool.linewidth, 2)
key_down = create_mouse_event(viewer.ax, key='-')
tool.on_key_press(key_down)
assert_equal(tool.linewidth, 1)
@skipif(not viewer_available)
def test_rect_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = RectangleTool(viewer.ax, maxdist=10)
tool.extents = (100, 150, 100, 150)
assert_equal(tool.corners,
((100, 150, 150, 100), (100, 100, 150, 150)))
assert_equal(tool.extents, (100, 150, 100, 150))
assert_equal(tool.edge_centers,
((100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150)))
assert_equal(tool.geometry, (100, 150, 100, 150))
# grab a corner and move it
grab = create_mouse_event(viewer.ax, xdata=100, ydata=100)
tool.press(grab)
move = create_mouse_event(viewer.ax, xdata=120, ydata=120)
tool.onmove(move)
tool.release(move)
assert_equal(tool.geometry, [120, 150, 120, 150])
# create a new line
new = create_mouse_event(viewer.ax, xdata=10, ydata=10)
tool.press(new)
move = create_mouse_event(viewer.ax, xdata=100, ydata=100)
tool.onmove(move)
tool.release(move)
assert_equal(tool.geometry, [10, 100, 10, 100])
@skipif(not viewer_available)
def test_paint_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = PaintTool(viewer.ax, img.shape)
tool.radius = 10
assert_equal(tool.radius, 10)
tool.label = 2
assert_equal(tool.label, 2)
assert_equal(tool.shape, img.shape)
start = create_mouse_event(viewer.ax, xdata=100, ydata=100)
tool.on_mouse_press(start)
move = create_mouse_event(viewer.ax, xdata=110, ydata=110)
tool.on_move(move)
tool.on_mouse_release(move)
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
tool.label = 5
start = create_mouse_event(viewer.ax, xdata=20, ydata=20)
tool.on_mouse_press(start)
move = create_mouse_event(viewer.ax, xdata=40, ydata=40)
tool.on_move(move)
tool.on_mouse_release(move)
assert_equal(tool.overlay[tool.overlay == 5].size, 881)
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
enter = create_mouse_event(viewer.ax, key='enter')
tool.on_mouse_press(enter)
tool.overlay = tool.overlay * 0
assert_equal(tool.overlay.sum(), 0)
@skipif(not viewer_available)
def test_base_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = CanvasToolBase(viewer.ax)
tool.set_visible(False)
tool.set_visible(True)
enter = create_mouse_event(viewer.ax, key='enter')
tool._on_key_press(enter)
tool.redraw()
tool.remove()
tool = CanvasToolBase(viewer.ax, useblit=False)
tool.redraw()
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/tests/test_base.py | 19 | 6858 |
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
"""Tests that clone creates a correct deep copy.
We create an estimator, make a copy of its original state
(which, in this case, is the current state of the estimator),
and check that the obtained copy is a correct deep copy.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
"""Tests that clone doesn't copy everything.
We first create an estimator, give it an own attribute, and
make a copy of its original state. Then we check that the copy doesn't
have the specific attribute we manually added to the initial estimator.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
"""Check that clone raises an error on buggy estimators."""
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
"""Regression test for cloning estimators with empty arrays"""
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
"""Smoke test the repr of the base estimator."""
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
"""Smoke test the str of the base estimator"""
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
#bad_pipeline = Pipeline([("bad", NoEstimator())])
#assert_raises(AttributeError, bad_pipeline.set_params,
#bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
brianlorenz/COSMOS_IMACS_Redshifts | PlotCodes/SED_Ratio.py | 1 | 13733 | #Finds the ratio between our observed spectra and the ULTRAVista ones so that we can flux calibrate the data
#Usage: run SED_Ratio.py a6 to find the ratio for the a6 mask.
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
from scipy.interpolate import splrep, splev
from scipy.signal import medfilt
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/COSMOSData/SED_Ratio_Images/'
#The location with the file for all of our data
ourdatapath = '/Users/blorenz/COSMOS/COSMOSData/all_c_hasinger.txt'
#Location for data files from the SED fitting code
seddatapath = '/Users/blorenz/COSMOS/COSMOSData/SEDmodels/'
#Location for all of the .fits files
fitsdatapath = '/Users/blorenz/COSMOS/COSMOSData/corFitsFileOut/'
#Where the calibrated spectra are stored (not necessary)
caldatapath = '/Users/blorenz/COSMOS/COSMOSData/flxFitsFileOut/'
letnum = sys.argv[1]
#Read in all of our data
ourdata = ascii.read(ourdatapath).to_pandas()
ourdata = ourdata[ourdata.ImageName.str.contains('feb1' + letnum[1] + '_' + letnum[0]) == True]
ourdata = ourdata[ourdata.Unsure == 0]
ourdata = ourdata[ourdata.Bad == 0]
ourdata = ourdata[ourdata.Flag3 == 0]
ourdata = ourdata[ourdata.Flag1 == 0]
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Polynomial fitting funciton
def svdfit(b,y):
decomp = np.linalg.svd(b,full_matrices=False)
sol1 = np.transpose(decomp[2])
sol2 = divz(1.0,decomp[1])
sol3 = np.dot(np.transpose(decomp[0]),y)
if np.sometrue(sol3):
solr = (sol2*sol3)
soll = np.dot(sol1,solr)
else:
soll = np.zeros(sol3.shape)
return soll
#Array to store the model ratios
modelrat = []
#Array to store the flats
flatarr = []
#Change to for loop to cycle through the data
for i in range(0,len(ourdata)):
#Find the identifying pieces of the string. e.g. 'a' '6' '026110'
let = ourdata.iloc[i].ImageName[17]
num = ourdata.iloc[i].ImageName[15]
objid = ourdata.iloc[i].ImageName[4:10]
#Read the corresponding SED model, e.g. '026110_a6.dat'
if os.path.exists(seddatapath + objid + '_' + let + num + '.dat'):
sedmodel = ascii.read(seddatapath + objid + '_' + let + num + '.dat').to_pandas()
#Read the corresponding spectrum
hdu = fits.open(fitsdatapath + ourdata.iloc[i].ImageName)[0]
head = hdu.header
fitsdata = hdu.data
#Calculate the wavelength range for the data
crval1 = head["crval1"]
crpix1 = head["crpix1"]
cdelt1 = head["cdelt1"]
naxis1 = head["naxis1"]
dcflag = head["dc-flag"]
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
if dcflag: wavelength = np.power(10.0,wavelength)
#Split the fits data into its compenents
spec = fitsdata[0]
orig = fitsdata[1]
noise = fitsdata[2]
flat = fitsdata[3]
sky = fitsdata[4]
tc = fitsdata[5]
mask = fitsdata[6]
fit = fitsdata[7]
#Ranges where we will check the red, green, blue signal to noise
blueidx = np.logical_and(wavelength > 5500, wavelength < 6000)
greenidx = np.logical_and(wavelength > 6500, wavelength < 7500)
redidx = np.logical_and(wavelength > 8500, wavelength < 9000)
#Check if the signal to noise ratio [S/N] is good enough
sn = divz(spec,noise)
bluesn = np.median(sn[blueidx])
greensn = np.median(sn[greenidx])
redsn = np.median(sn[redidx])
if (redsn > 2 and greensn > 10 and bluesn > 5):
#Clip the model to the size of the spectrum:
idxarr = np.where(np.logical_and(sedmodel['lambda']>=np.min(wavelength), sedmodel['lambda']<=np.max(wavelength)))
sedmodel = sedmodel.truncate(np.min(idxarr),np.max(idxarr))
#Make the model the same resolution as the spectrum
sdtck = splrep(sedmodel["lambda"],sedmodel["model"],s=0,k=3)
himodel = splev(wavelength,sdtck)
#Divide the spectrum by the model to get the ratio
modelrat.append(divz(spec,himodel))
#Add the flat to the array only if everything worked
flatarr.append(flat)
else: print 'SN too low for ' + objid + '_' + let + num
else: print 'Cannot Find File for ' + objid + '_' + let + num
#Stack the appended arrays into a single array
modelrat = np.vstack(modelrat)
flatarr = np.vstack(flatarr)
#We only want the median between the good data, so 6500-7500 Angstroms
#Get the indices
goodidx = np.logical_and(wavelength > 6500, wavelength < 7500)
#Find the median of the flat in the right range
medflat = np.median(flat[goodidx])
#Compute the deblazed ratio for each object
deblazarr = divz(modelrat,flatarr/medflat)
deblaze50 = np.median(deblazarr,axis=0)
deblaze16 = np.percentile(deblazarr,16,axis=0)
deblaze84 = np.percentile(deblazarr,84,axis=0)
#Find the median, 16th percentile, and 84th percentile of the models pixel by pixel
perc50 = np.median(modelrat,axis=0)
perc16 = np.percentile(modelrat,16,axis=0)
perc84 = np.percentile(modelrat,84,axis=0)
flat = np.median(flatarr,axis=0)
#Normalize the models first, then find the three percentiles
#Find the median of each model
#modelratmeds = [np.median(i) for i in modelrat]
medlist = []
demedlist = []
for j in range(0,len(modelrat)):
#Compute the median over only the good pixel range
medlist.append(np.median(modelrat[j][goodidx]))
demedlist.append(np.median(deblazarr[j][goodidx]))
#Make the median lists into arrays
meds = np.asarray(medlist)
demeds = np.asarray(demedlist)
#Divide through by the median lists to normalize each line of the array
nmodelrat = np.transpose(np.divide(np.transpose(modelrat),meds))
ndeblaze = np.transpose(np.divide(np.transpose(deblazarr),demeds))
#Find the 50th, 16th, 84th percentile pixel by pixel in each of the two arrays
normperc50 = np.median(nmodelrat,axis=0)
normperc16 = np.percentile(nmodelrat,16,axis=0)
normperc84 = np.percentile(nmodelrat,84,axis=0)
normdeblaze50 = np.median(ndeblaze,axis=0)
normdeblaze16 = np.percentile(ndeblaze,16,axis=0)
normdeblaze84 = np.percentile(ndeblaze,84,axis=0)
#Find their medians
#med50 = np.median(perc50[goodidx])
#med16 = np.median(perc16[goodidx])
#med84 = np.median(perc84[goodidx])
#Divide through by the median
#normperc50 = divz(perc50,med50)
#normperc16 = divz(perc16,med50)
#normperc84 = divz(perc84,med50)
#Deblazing the spectra:
#Divide the particular flat by its median to normalize it
#normflat = divz(flat,medflat)
#Now divide each of the models by the normalized flat
#deblaze50 = divz(perc50,normflat)
#deblaze16 = divz(perc16,normflat)
#deblaze84 = divz(perc84,normflat)
#Finally, normalize them
#meddeblaze = np.median(deblaze50[goodidx])
#normdeblaze50 = divz(deblaze50,meddeblaze)
#normdeblaze16 = divz(deblaze16,meddeblaze)
#normdeblaze84 = divz(deblaze84,meddeblaze)
#Polynomial fitting to the deblazed, normalized spectrum
order = 4
#only fit to data before 9200 angstroms since it is noisy after that
polyidx = np.logical_and((wavelength < 9200),(wavelength > 5250))
#Median filter the spectrum to get rid of the giant peaks
snormdeblaze50 = medfilt(normdeblaze50,51)
#Fit a polynomial to the median filtered spectrum,but only in the disred wavelengths
poly = np.polyfit(wavelength[polyidx],snormdeblaze50[polyidx],order)
#Get the output of this polynomial
polygraph = np.polyval(poly,wavelength)
#Plot the data, smooth, model, and fit
fig, axarr = plt.subplots(2,2,figsize=(14,7),sharex=True)
ax0,ax1,ax2,ax3 = axarr[0,0],axarr[0,1],axarr[1,0],axarr[1,1]
ax0.plot(wavelength,perc50, color='red', label = 'Median')
ax0.plot(wavelength,perc84, color='black', label = '84th Percentile')
ax0.plot(wavelength,perc16, color='gray', label = '16th Percentile')
ax1.plot(wavelength,normperc50, color='red', label = 'Median')
ax1.plot(wavelength,normperc84, color='black', label = '84th Percentile')
ax1.plot(wavelength,normperc16, color='gray', label = '16th Percentile')
ax2.plot(wavelength,deblaze50, color='red', label = 'Median')
ax2.plot(wavelength,deblaze84, color='black', label = '84th Percentile')
ax2.plot(wavelength,deblaze16, color='gray', label = '16th Percentile')
ax3.plot(wavelength,normdeblaze50, color='red', label = 'Median')
ax3.plot(wavelength,normdeblaze84, color='black', label = '84th Percentile')
ax3.plot(wavelength,normdeblaze16, color='gray', label = '16th Percentile')
ax3.plot(wavelength,polygraph, color='blue', label = 'Polynomial Fit ' + str(order))
ax3.plot([9200,9200],[-1000,1000],color='blue')
ax0.set_title('Raw Ratio, Mask ' + let + num)
ax1.set_title('Normalized Raw Ratio')
ax2.set_title('Deblazed Ratio')
ax3.set_title('Normalized Deblazed Ratio')
ax0.set_ylim(0,3*np.median(perc50))
ax1.set_ylim(0,3*np.median(normperc50))
ax2.set_ylim(0,3*np.median(deblaze50))
ax3.set_ylim(0,3*np.median(normdeblaze50))
ax3.set_xlabel('Wavelength (Angstroms)')
ax0.legend()
ax3.legend()
plt.tight_layout()
fig.savefig(figout + letnum + '_SEDratio.png')
plt.close()
#Flux Calibrate the objects
i=9
if 1 == 1:
#for i in range(1,30):
OBJID = ourdata.iloc[i].ImageName[4:10]
objid = OBJID
#Read the corresponding SED model, e.g. '026110_a6.dat'
if os.path.exists(seddatapath + objid + '_' + let + num + '.dat'):
sedmodel2 = ascii.read(seddatapath + objid + '_' + let + num + '.dat').to_pandas()
#Read the corresponding spectrum
hdu = fits.open(fitsdatapath + ourdata.iloc[i].ImageName)[0]
head = hdu.header
fitsdata = hdu.data
#Calculate the wavelength range for the data
crval1 = head["crval1"]
crpix1 = head["crpix1"]
cdelt1 = head["cdelt1"]
naxis1 = head["naxis1"]
dcflag = head["dc-flag"]
exptime = head['exptime']
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
if dcflag: wavelength = np.power(10.0,wavelength)
#Split the fits data into its compenents
spec = fitsdata[0]
orig = fitsdata[1]
noise = fitsdata[2]
flat = fitsdata[3]
sky = fitsdata[4]
tc = fitsdata[5]
mask = fitsdata[6]
fit = fitsdata[7]
#Flux calibrate the spectrum
m = np.median(divz(flat[goodidx],medflat))
corspec = divz(spec/exptime,polygraph*flat*m/medflat)
#Plot the flux-calibrated corrected spectrum for one object
fig, axarr = plt.subplots(2,2,figsize=(14,8))#,sharex=True)
ax1,ax2,ax0,ax3 = axarr[0,0],axarr[0,1],axarr[1,0],axarr[1,1]
#ax0.plot(wavelength,noise, color='noise', label = 'Noise')
#ax1.plot(wavelength,normdeblaze84, color='lightgray', label = '+$\sigma$')
#ax1.plot(wavelength,normdeblaze16, color='gray', label = '-$\sigma$')
#ax1.plot(wavelength,normdeblaze50, color='Black', label = 'Median')
ax1.plot(wavelength,polygraph, color='cyan',lw=3, label = 'Polynomial Fit') #, order ' + str(order))
ax1.plot([9200,9200],[-1000,1000],color='C1',ls='-')
#ax2.plot(wavelength,corspec,color='cornflowerblue',label='Corrected Spectrum')
ax3.plot(wavelength,flat,color='cornflowerblue',label='Flat')
#Plot Dan's flux corrected spectrum if it exists
flxfits = caldatapath + 'flx_' + OBJID + '_feb1' + num + '_' + let + 'big.fits'
if os.path.exists(flxfits):
flxdata = fits.open(flxfits)[0].data
flxspec = flxdata[0]
model = flxdata[3]
#ax2.plot(wavelength,flxspec*10**(-17),color='cornflowerblue',label='Flux-Calibrated Spectrum', alpha=1)
ax2.plot(wavelength,model*10**(-17),color='red',label='Stellar Population Model',alpha=1)
modeldiv = divz(spec,model)
#ax2.plot(sedmodel2['lambda'],sedmodel2['model'],color='red',label='Model Spectrum',alpha=0.5)
ax0.plot(wavelength,modeldiv, color='cornflowerblue', label = 'Orginial Spectrum')
titlefont = 18
axisfont = 14
ticklabel = 12
ax0.set_title('Original Spectrum ' + OBJID + '_' + letnum,fontsize=titlefont)
ax1.set_title('Normalized Deblazed Flux Ratio',fontsize=titlefont)
#ax2.set_title('Flux-Calibrated Spectrum',fontsize=titlefont)
ax3.set_title('Flatfield Spectrum',fontsize=titlefont)
scale = 2.5
ax0.set_ylim(0,scale*np.median(modeldiv))
ax3.set_ylim(0,scale*np.median(flat))
ax1.set_ylim(0,scale*np.median(normdeblaze50))
ax2.set_ylim(0,scale*np.median(flxspec*10**(-17)))
#ax2.legend()
#ax1.legend()
ax1.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax2.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax0.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax3.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax0.set_ylabel('Counts',fontsize = axisfont)
ax3.set_ylabel('Counts',fontsize = axisfont)
ax1.set_ylabel('Normalized Counts',fontsize = axisfont)
ax2.set_ylabel('Flux (erg/s/${cm}^2/\AA$)',fontsize = axisfont)
ax0.set_xlim(4800,9000)
ax1.set_xlim(4800,9000)
ax2.set_xlim(4800,9000)
ax3.set_xlim(4800,9000)
ax0.tick_params(labelsize=ticklabel)
ax1.tick_params(labelsize=ticklabel)
ax2.tick_params(labelsize=ticklabel)
ax3.tick_params(labelsize=ticklabel)
plt.tight_layout()
plt.show()
fig.savefig(figout + 'flx_' + OBJID + '_' + letnum + '.png')
plt.close()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tseries/tests/test_base.py | 7 | 116023 | from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx[-1]
self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_take(self):
# GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx.take([5])
self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
dandye/expedia | PythonBenchmark/data_io.py | 3 | 1229 | import csv
from operator import itemgetter
import os
import json
import pickle
import pandas as pd
def get_paths():
paths = json.loads(open("SETTINGS.json").read())
for key in paths:
paths[key] = os.path.expandvars(paths[key])
return paths
def read_train():
train_path = get_paths()["train_path"]
return pd.read_csv(train_path)
def read_test():
test_path = get_paths()["test_path"]
return pd.read_csv(test_path)
def save_model(model):
out_path = get_paths()["model_path"]
pickle.dump(model, open(out_path, "w"))
def load_model():
in_path = get_paths()["model_path"]
return pickle.load(open(in_path))
def write_submission(recommendations, submission_file=None):
if submission_file is None:
submission_path = get_paths()["submission_path"]
else:
path, file_name = os.path.split(get_paths()["submission_path"])
submission_path = os.path.join(path, submission_file)
rows = [(srch_id, prop_id)
for srch_id, prop_id, rank_float
in sorted(recommendations, key=itemgetter(0,2))]
writer = csv.writer(open(submission_path, "w"), lineterminator="\n")
writer.writerow(("SearchId", "PropertyId"))
writer.writerows(rows)
| bsd-3-clause |
PrashntS/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
paris-saclay-cds/ramp-workflow | rampwf/utils/scoring.py | 1 | 5037 | # coding: utf-8
"""
Scoring utilities
"""
import numpy as np
import pandas as pd
from .pretty_print import IS_COLOR_TERM
from .pretty_print import print_warning
def reorder_df_scores(df_scores, score_types):
"""Reorder scores according to the order in score_types.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
Returns
-------
df_scores : the dataframe with reordered scores
"""
try:
# try to re-order columns/rows in the printed array
# we may not have all train, valid, test, so need to select
index_order = np.array(['train', 'valid', 'test'])
ordered_index = index_order[np.isin(index_order, df_scores.index)]
df_scores = df_scores.loc[
ordered_index, [score_type.name for score_type in score_types]]
except Exception:
print_warning("Couldn't re-order the score matrix..")
return df_scores
def mean_score_matrix(df_scores_list, score_types):
u"""Construct a mean ± std score dataframe from a list of score dataframes.
Parameters
----------
df_scores_list : list of pd.DataFrame
a list of score data frames to average
score_types : list of score types
a list of score types to use
Returns
-------
df_scores : the mean ± std score dataframe
"""
scores = np.array([df_scores.values for df_scores in df_scores_list])
meanss = scores.mean(axis=0)
stdss = scores.std(axis=0)
precisions = [st.precision for st in score_types]
precisions.append(1) # for time
# we use unicode no break space so split in print_df_scores works
if IS_COLOR_TERM:
strs = np.array([[
u'{val}\u00A0±\u00A0{std}'.format(
val=round(mean, prec),
std=round(std, prec + 1))
for mean, std, prec in zip(means, stds, precisions)]
for means, stds in zip(meanss, stdss)])
else:
strs = np.array([[
u'{val} +- {std}'.format(
val=round(mean, prec),
std=round(std, prec + 1))
for mean, std, prec in zip(means, stds, precisions)]
for means, stds in zip(meanss, stdss)])
df_scores = pd.DataFrame(
strs, columns=df_scores_list[0].columns, index=df_scores_list[0].index)
return df_scores
def score_matrix_from_scores(score_types, steps, scoress):
"""Construct a score dataframe from a matrix of scores.
Parameters
----------
score_types : list of score types
a list of score types to use, score_type.name serves as column index
steps : a list of strings
subset of ['train', 'valid', 'test'], serves as row index
Returns
-------
df_scores : the score dataframe
"""
results = []
for step, scores in zip(steps, scoress):
for score_type, score in zip(score_types, scores):
results.append(
{'step': str(step), 'score': score_type.name, 'value': score})
df_scores = pd.DataFrame(results)
df_scores = df_scores.set_index(['step', 'score'])['value']
df_scores = df_scores.unstack()
df_scores = reorder_df_scores(df_scores, score_types)
return df_scores
def score_matrix(score_types, ground_truth, predictions):
"""Construct a score dataframe by scoring predictions against ground truth.
Parameters
----------
score_types : list of score types
a list of score types to use, score_type.name serves as column index
ground_truth : dict of Predictions
the ground truth data
predictions : dict of Predictions
the predicted data
Returns
-------
df_scores : pd.DataFrame
table of scores (rows = train/valid/test steps, columns = scores)
"""
if set(ground_truth.keys()) != set(predictions.keys()):
raise ValueError(('Predictions and ground truth steps '
'do not match:\n'
' * predictions = {} \n'
' * ground_truth = {} ')
.format(set(predictions.keys()),
set(ground_truth.keys())))
steps = ground_truth.keys()
scoress = [[
score_type.score_function(ground_truth[step], predictions[step])
for score_type in score_types] for step in ground_truth]
return score_matrix_from_scores(score_types, steps, scoress)
def round_df_scores(df_scores, score_types):
"""Round scores to the precision set in the score type.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
Returns
-------
df_scores : the dataframe with rounded scores
"""
df_scores_copy = df_scores.copy()
for column, score_type in zip(df_scores_copy, score_types):
df_scores_copy[column] = [round(score, score_type.precision)
for score in df_scores_copy[column]]
return df_scores_copy
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter5/fig_likelihood_gaussgauss.py | 3 | 3082 | """
Gaussian Distribution with Gaussian Errors
------------------------------------------
Figure 5.7
The logarithm of the posterior probability density function for :math:`\mu`
and :math:`\sigma`, :math:`L_p(\mu,\sigma)`, for a Gaussian distribution with
heteroscedastic Gaussian measurement errors (sampled uniformly from the 0-3
interval), given by eq. 5.64. The input values are :math:`\mu = 1` and
:math:`\sigma = 1`, and a randomly generated sample has 10 points. Note that
the posterior pdf is not symmetric with respect to the :math:`\mu = 1` line,
and that the outermost contour, which encloses the region that contains 0.997
of the cumulative (integrated) posterior probability, allows solutions with
:math:`\sigma = 0`.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def gaussgauss_logL(xi, ei, mu, sigma):
"""Equation 5.63: gaussian likelihood with gaussian errors"""
ndim = len(np.broadcast(sigma, mu).shape)
xi = xi.reshape(xi.shape + tuple(ndim * [1]))
ei = ei.reshape(ei.shape + tuple(ndim * [1]))
s2_e2 = sigma ** 2 + ei ** 2
return -0.5 * np.sum(np.log(s2_e2) + (xi - mu) ** 2 / s2_e2, 0)
#------------------------------------------------------------
# Define the grid and compute logL
np.random.seed(5)
mu_true = 1.
sigma_true = 1.
N = 10
ei = 3 * np.random.random(N)
xi = np.random.normal(mu_true, np.sqrt(sigma_true ** 2 + ei ** 2))
sigma = np.linspace(0.01, 5, 70)
mu = np.linspace(-3, 5, 70)
logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis])
logL -= logL.max()
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower',
extent=(mu[0], mu[-1], sigma[0], sigma[-1]),
cmap=plt.cm.binary,
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.text(0.5, 0.93,
(r'$L(\mu,\sigma)\ \mathrm{for}\ \bar{x}=1,\ '
r'\sigma_{\rm true}=1,\ n=10$'),
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.contour(mu, sigma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
| bsd-2-clause |
huongttlan/statsmodels | statsmodels/tools/tests/test_tools.py | 26 | 18818 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones(5))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
sunyihuan326/DeltaLab | Andrew_NG_learning/class_one/week_three/syh_01.py | 1 | 5963 | # coding:utf-8
'''
Created on 2017/11/6
@author: sunyihuan
'''
import numpy as np
import matplotlib.pyplot as plt
from class_one.week_three import testCases
import sklearn
import sklearn.datasets
import sklearn.linear_model
import class_one.week_three.testCases
from class_one.week_three.planar_utils import plot_decision_boundary, load_extra_datasets, load_planar_dataset, sigmoid
np.random.seed(1)
# 数据导入
X, Y = load_planar_dataset()
m_x = np.shape(X)
m_y = np.shape(Y)
# print(m_x, m_y)
Y = Y.ravel()
# 应用logistic模型
# clf = sklearn.linear_model.LogisticRegressionCV()
# clf.fit(X.T, Y.T)
# plot_decision_boundary(lambda x: clf.predict(x), X, Y)
# plt.title("Logistic Regression")
# 计算logistic模型误差
# LR_predictions = clf.predict(X.T)
# print(float((np.dot(Y, LR_predictions) + np.dot(1 - Y, 1 - LR_predictions)) / float(Y.size) * 100))
# 定义层数
def layer_sizes(X, Y, n_h=4):
n_x = X[0]
n_y = Y[0]
return (n_x, n_h, n_y)
# X_assess, Y_assess = testCases.layer_sizes_test_case()
# (n_x, n_h, n_y) = layer_sizes(X_assess.shape, Y_assess.shape)
# print(str(n_x), str(n_h), str(n_y))
def initialize_parameters(n_x, n_h, n_y):
np.random.seed(2)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
assert W1.shape == (n_h, n_x)
assert b1.shape == (n_h, 1)
assert W2.shape == (n_y, n_h)
assert b2.shape == (n_y, 1)
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2
}
return parameters
# 测试initialize_parameters函数
# n_x, n_h, n_y = testCases.initialize_parameters_test_case()
#
# parameters = initialize_parameters(n_x, n_h, n_y)
# print(str(parameters["W1"]),str(parameters["b2"]))
# 定义forward propagation
def forward_propagation(X, parameters):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
assert A2.shape == (1, X.shape[1])
cache = {
"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2
}
return A2, cache
# 测试forward_propagation函数
# X_assess, parameters = testCases.forward_propagation_test_case()
# A2, cache = forward_propagation(X_assess, parameters)
# print(np.mean(cache["Z2"]))
# 计算成本函数
def compute_cost(A2, Y, parameters):
m = Y.shape[1]
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y)
cost = - 1 / m * np.sum(logprobs) # 求和
cost = np.squeeze(cost)
assert isinstance(cost, float)
return cost
# 测试compute_cost函数
# A2, Y_assess, parameters = testCases.compute_cost_test_case()
# print(str(compute_cost(A2, Y_assess, parameters)))
# 定义backward propagation
def backward_propagation(X, Y, parameters, cache):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dW2 = 1 / m * np.dot(dZ2, A1.T)
db2 = 1 / m * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1 / m * np.dot(dZ1, X.T)
db1 = 1 / m * np.sum(dZ1, axis=1, keepdims=True)
grads = {
"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2
}
return grads
# 测试backward_propagation函数
# parameters, cache, X_assess, Y_assess = testCases.backward_propagation_test_case()
# grads = backward_propagation(X_assess, Y_assess, parameters, cache)
# print(str(grads["dW2"]), str(grads["db2"]))
def update_parameters(parameters, grads, learning_rate=1.2):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
W1 = W1 - learning_rate * dW1
b1 = b1 - db1 * learning_rate
W2 = W2 - learning_rate * dW2
b2 = b2 - db2 * learning_rate
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2
}
return parameters
# 测试update_parameters函数
# parameters, grads = testCases.update_parameters_test_case()
# parameters = update_parameters(parameters, grads)
# print(str(parameters["b1"]), str(parameters["b2"]))
def nn_model(X, Y, n_h, num_iterations=1000, print_cost=False):
np.random.seed(3)
n_x, n_h, n_y = layer_sizes(X.shape, Y.shape, n_h)
parameters = initialize_parameters(n_x, n_h, n_y)
for i in range(0, num_iterations):
A2, cache = forward_propagation(X, parameters)
cost = compute_cost(A2, Y, parameters)
grads = backward_propagation(X, Y, parameters, cache)
parameters = update_parameters(parameters, grads, learning_rate=1.2)
if print_cost and i % 1000 == 0:
print("Cost after interation %i:%f" % (i, cost))
return parameters
# 测试nn_model函数
# X_assess, Y_assess = testCases.nn_model_test_case()
#
# parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=False)
# print(str(parameters["W1"]))
def predict(parameters, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
predictions = np.array(A2 > 0.5)
### END CODE HERE ###
return predictions
# 测试predict函数
# parameters, X_assess = testCases.predict_test_case()
#
# predictions = predict(parameters, X_assess)
# print("predictions mean = " + str(np.mean(predictions)))
| mit |
kdebrab/pandas | pandas/tests/extension/base/interface.py | 2 | 2196 | import numpy as np
import pandas as pd
from pandas.compat import StringIO
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
from .base import BaseExtensionTests
class BaseInterfaceTests(BaseExtensionTests):
"""Tests that the basic interface is satisfied."""
# ------------------------------------------------------------------------
# Interface
# ------------------------------------------------------------------------
def test_len(self, data):
assert len(data) == 100
def test_ndim(self, data):
assert data.ndim == 1
def test_can_hold_na_valid(self, data):
# GH-20761
assert data._can_hold_na is True
def test_memory_usage(self, data):
s = pd.Series(data)
result = s.memory_usage(index=False)
assert result == s.nbytes
def test_array_interface(self, data):
result = np.array(data)
assert result[0] == data[0]
def test_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
df = pd.DataFrame({"A": data})
repr(df)
def test_repr_array(self, data):
# some arrays may be able to assert
# attributes in the repr
repr(data)
def test_repr_array_long(self, data):
# some arrays may be able to assert a ... in the repr
with pd.option_context('display.max_seq_items', 1):
repr(data)
def test_dtype_name_in_info(self, data):
buf = StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
def test_is_extension_array_dtype(self, data):
assert is_extension_array_dtype(data)
assert is_extension_array_dtype(data.dtype)
assert is_extension_array_dtype(pd.Series(data))
assert isinstance(data.dtype, ExtensionDtype)
def test_no_values_attribute(self, data):
# GH-20735: EA's with .values attribute give problems with internal
# code, disallowing this for now until solved
assert not hasattr(data, 'values')
assert not hasattr(data, '_values')
| bsd-3-clause |
ResByte/graph_slam | scripts/robot.py | 1 | 1487 | #!/usr/bin/env python
import roslib
import rospy
import sys
from geometry_msgs.msg import Twist
import numpy as np
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
import matplotlib.pyplot as plt
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
import itertools
class Robot():
"""This is a generic robot class to implement various machine learning algorithms and """
def __init__(self):
self.pose = [] #check if this needs to be initialized
rospy.init_node('robot',anonymous = False)
def odomCb(self,msg):
#print msg.pose.pose
quaternion = (msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,msg.pose.pose.orientation.z,msg.pose.pose.orientation.w)
euler = euler_from_quaternion(quaternion)
#print euler
self.pose = [msg.pose.pose.position.x,msg.pose.pose.position.y,euler[2]]
print self.pose[0],self.pose[1],self.pose[2]
def odomSub(self):
rospy.Subscriber('/odom',Odometry,self.odomCb)
def cloudCb(self,data):
#data_out = pc2.read_points(data, field_names=None, skip_nans=False, uvs=[[data.width, data.height]])
#cloud = list(itertools.islice(data_out,0,100))
cloud = np.asarray(data)
print
def cloudSub(self):
rospy.Subscriber('/camera/depth/points',PointCloud2,self.cloudCb)
if __name__ == '__main__':
print "init_node"
try:
robot = Robot()
while not rospy.is_shutdown():
#robot.odomSub()
robot.cloudSub()
except:
rospy.loginfo("node terminated.") | gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.