repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
Zsailer/epistasis
epistasis/models/classifiers/base.py
2
2387
import numpy as np import pandas as pd # Scikit-learn classifiers from sklearn.preprocessing import binarize from epistasis.mapping import EpistasisMap from epistasis.models.base import BaseModel, use_sklearn from epistasis.models.utils import (XMatrixException, arghandler) from epistasis.models.linear import EpistasisLinearRegression from gpmap import GenotypePhenotypeMap class EpistasisClassifierMixin: """A Mixin class for epistasis classifiers """ def _fit_additive(self, X=None, y=None): # Construct an additive model. self.Additive = EpistasisLinearRegression( order=1, model_type=self.model_type) self.Additive.add_gpm(self.gpm) # Prepare a high-order model self.Additive.epistasis = EpistasisMap( sites=self.Additive.Xcolumns, ) # Fit the additive model and infer additive phenotypes self.Additive.fit(X=X, y=y) return self def _fit_classifier(self, X=None, y=None): # This method builds x and y from data. add_coefs = self.Additive.epistasis.values add_X = self.Additive._X(data=X) # Project X into padd space. X = add_X * add_coefs # Label X. y = binarize(y.reshape(1, -1), self.threshold)[0] self.classes = y # Fit classifier. super().fit(X=X, y=y) return self def fit_transform(self, X=None, y=None, **kwargs): self.fit(X=X, y=y, **kwargs) ypred = self.predict(X=X) # Transform map. gpm = GenotypePhenotypeMap.read_dataframe( dataframe=self.gpm.data[ypred==1], wildtype=self.gpm.wildtype, mutations=self.gpm.mutations ) return gpm def predict(self, X=None): Xadd = self.Additive._X(data=X) X = Xadd * self.Additive.epistasis.values return super().predict(X=X) def predict_transform(self, X=None, y=None): x = self.predict(X=X) y[x <= 0.5] = self.threshold return y def predict_log_proba(self, X=None): Xadd = self.Additive._X(data=X) X = Xadd * self.Additive.epistasis.values return super().predict_log_proba(X) def predict_proba(self, X=None): Xadd = self.Additive._X(data=X) X = Xadd * self.Additive.epistasis.values return super().predict_proba(X=X)
unlicense
OthmanEmpire/project_monies
test/unit/test_unit_visualise.py
1
4289
import unittest import datetime as dt import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal import monies.monies.visualise as vis def disabled(func): def _wrapper(f): print(str(f) + " test is disabled!") return _wrapper(func) class VisualiseUnit(unittest.TestCase): def setUp(self): header = ["DATE", "BALANCE", "AMOUNT", "DESCRIPTION"] self.dfInp = self.setupDefault(header) self.dfQuery = self.setupQuery(header) self.dfPlot = self.setupPlot(header) self.dfCategories = self.setupCategorise(header) def setupDefault(self, header): body = \ [ ["29/12/2012", "3472.63", "-10.45", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["28/12/2012", "3483.08", "-10.00", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["28/12/2011", "1344.08", "23.00", "CARD PAYMENT TO WWW.UCAS.COM,23.00 GBP, RATE 1.00/GBP ON "] ] return pd.DataFrame(body, columns=header) def setupQuery(self, header): body = \ [ ["29/12/2012", "3472.63", "-10.45", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["28/12/2012", "3483.08", "-10.00", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ] return pd.DataFrame(body, columns=header) def setupPlot(self, header): body = \ [ ["29/12/2012", "3472.63", "-10.45", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["28/12/2012", "3483.08", "-10.00", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["27/12/2012", "3483.08", "-14.00", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ] return pd.DataFrame(body, columns=header) def setupCategorise(self, header): body = \ [ ["29/12/2012", "3472.63", "-10.45", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ["28/12/2012", "3483.08", "-10.00", "CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, " "RATE 1.00/GBP ON 26-12-2012"], ] return [("Food", pd.DataFrame(body, columns=header))] def testQuery(self): exp = self.dfQuery out = vis.query(self.dfInp, "JUST EAT") self._assertDataFrames(exp, out) def testCategorise(self): exp = self.dfCategories out = vis.categorise(self.dfInp, [("Food", "JUST EAT")]) for i in range(max(len(out), len(exp))): _, dfOut = out[i] _, dfExp = exp[i] self._assertDataFrames(dfExp, dfOut) def testSliceMonthly(self): def _generateMonthData(month): days = pd.date_range(dt.datetime(2015, month+1, 1), dt.datetime(2015, month, 1)) data = np.random.rand(len(days)) month = pd.DataFrame({"BALANCE": data, "AMOUNT": data, "DESCRIPTION": data}, index=days) return month jan = _generateMonthData(1) feb = _generateMonthData(2) mar = _generateMonthData(3) allMonths = jan + feb + mar months = vis.sliceMonthly(allMonths) assert_frame_equal(months[0], jan) assert_frame_equal(months[1], feb) assert_frame_equal(months[2], mar) def _assertDataFrames(self, exp, out): if not out.equals(exp): self.fail("EXPECTED:\n{}\n\n\n" "ACTUAL:\n{}\n\n\n".format(exp, out))
mit
anaderi/hep_ml
hep_ml/reweight.py
3
11419
""" **hep_ml.reweight** contains reweighting algorithms. Reweighting is procedure of finding such weights for original distribution, that make distribution of one or several variables identical in original distribution and target distribution. Remark: if each variable has identical distribution in two samples, this doesn't imply that multidimensional distributions are equal (almost surely they aren't). Aim of reweighters is to get identical multidimensional distributions. Algorithms are implemented as estimators, fitting and reweighting stages are split. Fitted reweighter can be applied many times to different data, pickled and so on. Examples ________ The most common use case is reweighting of Monte-Carlo simulations results to sPlotted real data. (original weights are all equal to 1 and could be skipped, but left here for example) >>> from hep_ml.reweight import BinsReweighter, GBReweighter >>> original_weights = numpy.ones(len(MC_data)) >>> reweighter = BinsReweighter(n_bins=100, n_neighs=3) >>> reweighter.fit(original=MC_data, target=RealData, >>> original_weight=original_weights, target_weight=sWeights) >>> MC_weights = reweighter.predict_weights(MC_data, original_weight=original_weights) The same example for `GBReweighter`: >>> reweighter = GBReweighter(max_depth=2, other_args={'subsample': 0.5}) >>> reweighter.fit(original=MC_data, target=RealData, target_weight=sWeights) >>> MC_weights = reweighter.predict_weights(MC_data) """ from __future__ import division, print_function, absolute_import from sklearn.base import BaseEstimator from scipy.ndimage import gaussian_filter from hep_ml.commonutils import check_sample_weight, weighted_quantile from hep_ml import gradientboosting as gb from hep_ml import losses from warnings import warn import numpy __author__ = 'Alex Rogozhnikov' __all__ = ['BinsReweighter', 'GBReweighter'] warn("Module hep_ml.reweight is unstable, it's API may be changed in near future.") def bincount_nd(x, weights, shape): """ Does the same thing as numpy.bincount, but allows binning in several integer variables. :param x: numpy.array of shape [n_samples, n_features] with non-negative integers :param weights: weights of samples, array of shape [n_samples] :param shape: shape of result, should be greater, then maximal value :return: weighted number of event in each bin, of shape=shape """ assert len(weights) == len(x), 'length of weight is different: {} {}'.format(len(x), len(weights)) assert x.shape[1] == len(shape), 'wrong length of shape: {} {}'.format(x.shape[1], len(shape)) maximals = numpy.max(x, axis=0) assert numpy.all(maximals < shape), 'smaller shape: {} {}'.format(maximals, shape) result = numpy.zeros(shape, dtype=float) numpy.add.at(result, tuple(x.T), weights) return result class ReweighterMixin(object): """Supplementary class which shows the interface of reweighter. Reweighters should be derived from this class.""" n_features_ = None def _normalize_input(self, data, weights): """ Normalize input of reweighter :param data: array like of shape [n_samples] or [n_samples, n_features] :param weights: array-like of shape [n_samples] or None :return: tuple with data - numpy.array of shape [n_samples, n_features] weights - numpy.array of shape [n_samples] with mean = 1. """ weights = check_sample_weight(data, sample_weight=weights, normalize=True) data = numpy.array(data) if len(data.shape) == 1: data = data[:, numpy.newaxis] if self.n_features_ is None: self.n_features_ = data.shape[1] assert self.n_features_ == data.shape[1], \ 'number of features is wrong: {} {}'.format(self.n_features_, data.shape[1]) return data, weights def fit(self, original, target, original_weight, target_weight): raise NotImplementedError('To be overriden in descendants') def predict_weights(self, original, original_weight=None): raise NotImplementedError('To be overriden in descendants') class BinsReweighter(BaseEstimator, ReweighterMixin): def __init__(self, n_bins=200, n_neighs=3.): """ Use bins for reweighting. Bins' edges are computed using quantiles along each axis (which is better than bins of even size). This method works fine for 1d/2d histograms, while being quite unstable or inaccurate for higher dimensions. :param int n_bins: how many bins to use for each input variable. :param int n_neighs: size of gaussian filter (in bins). This parameter is responsible for tradeoff between stability of rule and accuracy of predictions. With increase of n_neighs the """ self.n_percentiles = n_bins self.n_neighs = n_neighs # if number of events in bins is less than this value, number of events is clipped. self.min_in_the_bin = 1. def compute_bin_indices(self, data): """ Compute id of bin along each axis. :param data: data, array-like of shape [n_samples, n_features] with the same order of features as in training :return: numpy.array of shape [n_samples, n_features] with integers, each from [0, n_bins - 1] """ bin_indices = [] for axis, axis_edges in enumerate(self.edges): bin_indices.append(numpy.searchsorted(axis_edges, data[:, axis])) return numpy.array(bin_indices).T def fit(self, original, target, original_weight=None, target_weight=None): """ Prepare reweighting formula by computing histograms. :param original: values from original distribution, array-like of shape [n_samples, n_features] :param target: values from target distribution, array-like of shape [n_samples, n_features] :param original_weight: weights for samples of original distributions :param target_weight: weights for samples of original distributions :return: self """ self.n_features_ = None original, original_weight = self._normalize_input(original, original_weight) target, target_weight = self._normalize_input(target, target_weight) target_perc = numpy.linspace(0, 1, self.n_percentiles + 1)[1:-1] self.edges = [] for axis in range(self.n_features_): self.edges.append(weighted_quantile(target[:, axis], quantiles=target_perc, sample_weight=target_weight)) bins_weights = [] for data, weights in [(original, original_weight), (target, target_weight)]: bin_indices = self.compute_bin_indices(data) bin_w = bincount_nd(bin_indices, weights=weights, shape=[self.n_percentiles] * self.n_features_) smeared_weights = gaussian_filter(bin_w, sigma=self.n_neighs, truncate=2.5) bins_weights.append(smeared_weights.clip(self.min_in_the_bin)) bin_orig_weights, bin_targ_weights = bins_weights self.transition = bin_targ_weights / bin_orig_weights return self def predict_weights(self, original, original_weight=None): """ Returns corrected weights. Result is computed as original_weight * reweighter_multipliers. :param original: values from original distribution of shape [n_samples, n_features] :param original_weight: weights of samples before reweighting. :return: numpy.array of shape [n_samples] with new weights. """ original, original_weight = self._normalize_input(original, original_weight) bin_indices = self.compute_bin_indices(original) results = self.transition[tuple(bin_indices.T)] * original_weight return results class GBReweighter(BaseEstimator, ReweighterMixin): def __init__(self, n_estimators=40, learning_rate=0.2, max_depth=3, min_samples_leaf=200, gb_args=None): """ Gradient Boosted Reweighter - a reweighter algorithm based on ensemble of regression trees. Parameters have the same role, as in gradient boosting. Special loss function is used, trees are trained to maximize symmetrized binned chi-squared statistics. Training takes much more time than for bin-based versions, but `GBReweighter` is capable to work in high dimensions while keeping reweighting rule reliable and precise (and even smooth if many trees are used). :param n_estimators: number of trees :param learning_rate: float from [0, 1]. Lesser learning rate requires more trees, but makes reweighting rule more stable. :param max_depth: maximal depth of trees :param min_samples_leaf: minimal number of events in the leaf. If many :param gb_args: other parameters passed to gradient boosting. See :class:`hep_ml.gradientboosting.UGradientBoostingClassifier` """ self.learning_rate = learning_rate self.n_estimators = n_estimators self.max_depth = max_depth self.min_samples_leaf = min_samples_leaf self.gb_args = gb_args def fit(self, original, target, original_weight=None, target_weight=None): """ Prepare reweighting formula by training sequence of trees. :param original: values from original distribution, array-like of shape [n_samples, n_features] :param target: values from target distribution, array-like of shape [n_samples, n_features] :param original_weight: weights for samples of original distributions :param target_weight: weights for samples of original distributions :return: self """ self.n_features_ = None if self.gb_args is None: self.gb_args = {} original, original_weight = self._normalize_input(original, original_weight) target, target_weight = self._normalize_input(target, target_weight) self.gb = gb.UGradientBoostingClassifier(loss=losses.ReweightLossFunction(), n_estimators=self.n_estimators, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, learning_rate=self.learning_rate, **self.gb_args) data = numpy.vstack([original, target]) target = numpy.array([1] * len(original) + [0] * len(target)) weights = numpy.hstack([original_weight, target_weight]) self.gb.fit(data, target, sample_weight=weights) return self def predict_weights(self, original, original_weight=None): """ Returns corrected weights. Result is computed as original_weight * reweighter_multipliers. :param original: values from original distribution of shape [n_samples, n_features] :param original_weight: weights of samples before reweighting. :return: numpy.array of shape [n_samples] with new weights. """ original, original_weight = self._normalize_input(original, original_weight) multipliers = numpy.exp(self.gb.decision_function(original)) return multipliers * original_weight
apache-2.0
corochann/chainer-hands-on-tutorial
src/05_ptb_rnn/ptb/train_ptb.py
2
5504
""" RNN Training code with Penn Treebank (ptb) dataset Ref: https://github.com/chainer/chainer/blob/master/examples/ptb/train_ptb.py """ from __future__ import print_function import os import sys import argparse import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import chainer import chainer.functions as F import chainer.links as L from chainer import training, iterators, serializers, optimizers from chainer.training import extensions sys.path.append(os.pardir) from RNN import RNN from RNN2 import RNN2 from RNN3 import RNN3 from RNNForLM import RNNForLM from parallel_sequential_iterator import ParallelSequentialIterator from bptt_updater import BPTTUpdater # Routine to rewrite the result dictionary of LogReport to add perplexity # values def compute_perplexity(result): result['perplexity'] = np.exp(result['main/loss']) if 'validation/main/loss' in result: result['val_perplexity'] = np.exp(result['validation/main/loss']) def main(): archs = { 'rnn': RNN, 'rnn2': RNN2, 'rnn3': RNN3, 'lstm': RNNForLM } parser = argparse.ArgumentParser(description='RNN example') parser.add_argument('--arch', '-a', choices=archs.keys(), default='rnn', help='Net architecture') parser.add_argument('--unit', '-u', type=int, default=100, help='Number of RNN units in each layer') parser.add_argument('--bproplen', '-l', type=int, default=20, help='Number of words in each mini-batch ' '(= length of truncated BPTT)') parser.add_argument('--batchsize', '-b', type=int, default=10, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=10, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Architecture: {}'.format(args.arch)) print('# Minibatch-size: {}'.format(args.batchsize)) print('# epoch: {}'.format(args.epoch)) # 1. Load dataset: Penn Tree Bank long word sequence dataset train, val, test = chainer.datasets.get_ptb_words() n_vocab = max(train) + 1 # train is just an array of integers print('# vocab: {}'.format(n_vocab)) print('') # 2. Setup model model = archs[args.arch](n_vocab=n_vocab, n_units=args.unit) # , activation=F.tanh classifier_model = L.Classifier(model) classifier_model.compute_accuracy = False # we only want the perplexity if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current classifier_model.to_gpu() # Copy the model to the GPU eval_classifier_model = classifier_model.copy() # Model with shared params and distinct states eval_model = classifier_model.predictor # 2. Setup an optimizer optimizer = optimizers.Adam(alpha=0.001) #optimizer = optimizers.MomentumSGD() optimizer.setup(classifier_model) # 4. Setup an Iterator train_iter =ParallelSequentialIterator(train, args.batchsize) val_iter = ParallelSequentialIterator(val, 1, repeat=False) test_iter = ParallelSequentialIterator(test, 1, repeat=False) # 5. Setup an Updater updater = BPTTUpdater(train_iter, optimizer, args.bproplen, args.gpu) # 6. Setup a trainer (and extensions) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(val_iter, eval_classifier_model, device=args.gpu, # Reset the RNN state at the beginning of each evaluation eval_hook=lambda _: eval_model.reset_state()) ) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=(1, 'epoch')) interval = 500 trainer.extend(extensions.LogReport(postprocess=compute_perplexity, trigger=(interval, 'iteration'))) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'perplexity', 'val_perplexity', 'elapsed_time'] ), trigger=(interval, 'iteration')) trainer.extend(extensions.PlotReport( ['perplexity', 'val_perplexity'], x_key='epoch', file_name='perplexity.png')) trainer.extend(extensions.ProgressBar(update_interval=10)) # Resume from a snapshot if args.resume: serializers.load_npz(args.resume, trainer) # Run the training trainer.run() serializers.save_npz('{}/{}_ptb.model' .format(args.out, args.arch), model) # Evaluate the final model print('test') eval_model.reset_state() evaluator = extensions.Evaluator(test_iter, eval_classifier_model, device=args.gpu) result = evaluator() print('test perplexity:', np.exp(float(result['main/loss']))) if __name__ == '__main__': main()
mit
MonoCloud/zipline
tests/test_tradesimulation.py
21
2735
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd from nose_parameterized import parameterized from six.moves import range from unittest import TestCase from zipline import TradingAlgorithm from zipline.test_algorithms import NoopAlgorithm from zipline.utils import factory class BeforeTradingAlgorithm(TradingAlgorithm): def __init__(self, *args, **kwargs): self.before_trading_at = [] super(BeforeTradingAlgorithm, self).__init__(*args, **kwargs) def before_trading_start(self, data): self.before_trading_at.append(self.datetime) FREQUENCIES = {'daily': 0, 'minute': 1} # daily is less frequent than minute class TestTradeSimulation(TestCase): def test_minutely_emissions_generate_performance_stats_for_last_day(self): params = factory.create_simulation_parameters(num_days=1, data_frequency='minute', emission_rate='minute') algo = NoopAlgorithm(sim_params=params) algo.run(source=[], overwrite_sim_params=False) self.assertEqual(algo.perf_tracker.day_count, 1.0) @parameterized.expand([('%s_%s_%s' % (num_days, freq, emission_rate), num_days, freq, emission_rate) for freq in FREQUENCIES for emission_rate in FREQUENCIES for num_days in range(1, 4) if FREQUENCIES[emission_rate] <= FREQUENCIES[freq]]) def test_before_trading_start(self, test_name, num_days, freq, emission_rate): params = factory.create_simulation_parameters( num_days=num_days, data_frequency=freq, emission_rate=emission_rate) algo = BeforeTradingAlgorithm(sim_params=params) algo.run(source=[], overwrite_sim_params=False) self.assertEqual(algo.perf_tracker.day_count, num_days) self.assertTrue(params.trading_days.equals( pd.DatetimeIndex(algo.before_trading_at)), "Expected %s but was %s." % (params.trading_days, algo.before_trading_at))
apache-2.0
cpcloud/bokeh
bokeh/sampledata/daylight.py
4
2482
"""Daylight hours from http://www.sunrisesunset.com """ import re import datetime import requests from six.moves import xrange from os.path import join, abspath, dirname import pandas as pd url = "http://sunrisesunset.com/calendar.asp" r0 = re.compile("<[^>]+>|&nbsp;|[\r\n\t]") r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)") def fetch_daylight_hours(lat, lon, tz, dst, year): """Fetch daylight hours from sunrisesunset.com for a given location. Parameters ---------- lat : float Location's latitude. lon : float Location's longitude. tz : int or float Time zone offset from UTC. Use floats for half-hour time zones. dst : int Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe. See sunrisesunset.com/custom.asp for other possible values. year : int Year (1901..2099). """ daylight = [] summer = 0 if lat >= 0 else 1 for month in xrange(1, 12+1): args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month) response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args) entries = r1.findall(r0.sub("", response.text)) for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries: if note == "DST Begins": summer = 1 elif note == "DST Ends": summer = 0 date = datetime.date(year, month, int(day)) sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute)) sunset = datetime.time(int(sunset_hour), int(sunset_minute)) daylight.append([date, sunrise, sunset, summer]) return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"]) # daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013) # daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False) def load_daylight_hours(file): path = join(dirname(abspath(__file__)), file) df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"]) df["Date"] = df.Date.map(lambda x: x.date()) df["Sunrise"] = df.Sunrise.map(lambda x: x.time()) df["Sunset"] = df.Sunset.map(lambda x: x.time()) return df daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
bsd-3-clause
leonardolepus/pubmad
experiments/features_20140601/distribution.py
1
1833
import pickle import os, sys import itertools import matplotlib.pyplot as plt from scipy import stats sys.path.insert(1, os.path.abspath('../../')) from toolbox.graph_io.kegg.parse_KGML import KGML2Graph features = {} for feature_file in os.listdir('../../data/evex/Homo_Sapiens/features/'): with open('../../data/evex/Homo_Sapiens/features/'+feature_file, 'r') as f: try: features[feature_file] = pickle.load(f) except: print feature_file, sys.exc_info() edge_betweenness_centrality = features['edge_betweenness_centrality'] del features['edge_betweenness_centrality'] def distribution(x, label): fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax1.set_title(label+'_hist') ax1.hist(x, bins = 100, histtype = 'step') ax2 = fig.add_subplot(2, 1, 2) ax2.set_title(label+'_cumulative_normalized') ax2.hist(x, bins = 100, cumulative = True, normed = True, histtype = 'step') plt.savefig(label) for i in features: x = features[i].values() distribution(x, i) pathway_f = '../../data/kegg/hsa04151.xml' kegg = KGML2Graph(pathway_f) kegg = kegg.to_undirected() for i in features: ev = features[i] ke = [ev[j] for j in kegg.nodes_iter() if j in ev] ev = ev.values() fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax1.set_title(i+'_hist') ax1.hist(ev, bins = 100, histtype = 'step', label = 'evex', normed = True) ax1.hist(ke, bins = 100, histtype = 'step', label = 'kegg', normed = True) ax2 = fig.add_subplot(2, 1, 2) ax2.set_title(i+'_cumulative_normalized') ax2.hist(ev, bins = 100, cumulative = True, normed = True, histtype = 'step', label = 'evex') ax2.hist(ke, bins = 100, cumulative = True, normed = True, histtype = 'step', label = 'kegg') plt.savefig(i+'_evex_VS_kegg')
gpl-2.0
glue-viz/glue-3d-viewer
glue_vispy_viewers/compat/axis.py
3
22545
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- import math import numpy as np from vispy.visuals.visual import CompoundVisual from vispy.visuals.line import LineVisual from vispy.scene.visuals import create_visual_node from glue_vispy_viewers.compat.text import TextVisual # XXX TODO list (see code, plus): # 1. Automated tick direction? # 2. Expand to 3D (only 2D supported currently) # 3. Input validation # 4. Property support # 5. Reactivity to resizing (current tick lengths grow/shrink w/zoom) # 6. Improve tick label naming (str(x) is not good) and tick selection __all__ = ['Axis'] class AxisVisual(CompoundVisual): """Axis visual Parameters ---------- pos : array Co-ordinates of start and end of the axis. domain : tuple The data values at the beginning and end of the axis, used for tick labels. i.e. (5, 10) means the axis starts at 5 and ends at 10. Default is (0, 1). tick_direction : array The tick direction to use (in document coordinates). scale_type : str The type of scale. For now only 'linear' is supported. axis_color : tuple RGBA values for the axis colour. Default is black. tick_color : tuple RGBA values for the tick colours. The colour for the major and minor ticks is currently fixed to be the same. Default is a dark grey. text_color : Color The color to use for drawing tick and axis labels minor_tick_length : float The length of minor ticks, in pixels major_tick_length : float The length of major ticks, in pixels tick_width : float Line width for the ticks tick_label_margin : float Margin between ticks and tick labels tick_font_size : float The font size to use for rendering tick labels. axis_width : float Line width for the axis axis_label : str Text to use for the axis label axis_label_margin : float Margin between ticks and axis labels axis_font_size : float The font size to use for rendering axis labels. font_size : float Font size for both the tick and axis labels. If this is set, tick_font_size and axis_font_size are ignored. anchors : iterable A 2-element iterable (tuple, list, etc.) giving the horizontal and vertical alignment of the tick labels. The first element should be one of 'left', 'center', or 'right', and the second element should be one of 'bottom', 'middle', or 'top'. If this is not specified, it is determined automatically. """ def __init__(self, pos=None, domain=(0., 1.), tick_direction=(-1., 0.), scale_type="linear", axis_color=(1, 1, 1), tick_color=(0.7, 0.7, 0.7), text_color='w', minor_tick_length=5, major_tick_length=10, tick_width=2, tick_label_margin=5, tick_font_size=8, axis_width=3, axis_label=None, axis_label_margin=35, axis_font_size=10, font_size=None, anchors=None): if scale_type != 'linear': raise NotImplementedError('only linear scaling is currently ' 'supported') if font_size is not None: tick_font_size = font_size axis_font_size = font_size self._pos = None self._domain = None # If True, then axis stops at the first / last major tick. # If False, then axis extends to edge of *pos* # (private until we come up with a better name for this) self._stop_at_major = (False, False) self.ticker = Ticker(self, anchors=anchors) self.tick_direction = np.array(tick_direction, float) self.tick_direction = self.tick_direction self.scale_type = scale_type self.minor_tick_length = minor_tick_length # px self.major_tick_length = major_tick_length # px self.tick_label_margin = tick_label_margin # px self.axis_label_margin = axis_label_margin # px self._axis_label_text = axis_label self._need_update = True self._line = LineVisual(method='gl', width=axis_width, antialias=True, color=axis_color) self._ticks = LineVisual(method='gl', width=tick_width, connect='segments', antialias=True, color=tick_color) self._text = TextVisual(font_size=tick_font_size, color=text_color) self._axis_label = TextVisual(font_size=axis_font_size, color=text_color) CompoundVisual.__init__(self, [self._line, self._text, self._ticks, self._axis_label]) if pos is not None: self.pos = pos self.domain = domain @property def label_color(self): return self._text.color @label_color.setter def label_color(self, value): self._text.color = value self._axis_label.color = value @property def axis_color(self): return self._line.color @axis_color.setter def axis_color(self, value): self._line.set_data(color=value) @property def tick_color(self): return self._ticks.color @tick_color.setter def tick_color(self, value): self._ticks.set_data(color=value) @property def tick_font_size(self): return self._text.font_size @tick_font_size.setter def tick_font_size(self, value): self._text.font_size = value @property def axis_font_size(self): return self._axis_label.font_size @axis_font_size.setter def axis_font_size(self, value): self._axis_label.font_size = value @property def axis_label(self): return self._axis_label_text @axis_label.setter def axis_label(self, axis_label): self._axis_label_text = axis_label self._need_update = True self.update() @property def pos(self): return self._pos @pos.setter def pos(self, pos): self._pos = np.array(pos, float) self._need_update = True self.update() @property def domain(self): return self._domain @domain.setter def domain(self, d): if self._domain is None or d != self._domain: self._domain = d self._need_update = True self.update() @property def _vec(self): """Vector in the direction of the axis line""" return self.pos[1] - self.pos[0] def _update_subvisuals(self): tick_pos, labels, tick_label_pos, anchors, axis_label_pos = \ self.ticker.get_update() self._line.set_data(pos=self.pos, color=self.axis_color) self._ticks.set_data(pos=tick_pos, color=self.tick_color) self._text.text = list(labels) self._text.pos = tick_label_pos self._text.anchors = anchors if self.axis_label is not None: self._axis_label.text = self.axis_label self._axis_label.pos = axis_label_pos self._need_update = False def _prepare_draw(self, view): if self._pos is None: return False if self.axis_label is not None: # TODO: make sure we only call get_transform if the transform for # the line is updated tr = self._line.get_transform(map_from='visual', map_to='canvas') trpos = tr.map(self.pos) trpos /= trpos[:, 3:] x1, y1, x2, y2 = trpos[:, :2].ravel() if x1 > x2: x1, y1, x2, y2 = x2, y2, x1, y1 self._axis_label.rotation = math.degrees(math.atan2(y2-y1, x2-x1)) if self._need_update: self._update_subvisuals() def _compute_bounds(self, axis, view): if axis == 2: return (0., 0.) # now axis in (0, 1) return self.pos[:, axis].min(), self.pos[:, axis].max() class Ticker(object): """Class to determine tick marks Parameters ---------- axis : instance of AxisVisual The AxisVisual to generate ticks for. """ def __init__(self, axis, anchors=None): self.axis = axis self._anchors = anchors def get_update(self): major_tick_fractions, minor_tick_fractions, tick_labels = \ self._get_tick_frac_labels() tick_pos, tick_label_pos, axis_label_pos, anchors = \ self._get_tick_positions(major_tick_fractions, minor_tick_fractions) return tick_pos, tick_labels, tick_label_pos, anchors, axis_label_pos def _get_tick_positions(self, major_tick_fractions, minor_tick_fractions): # tick direction is defined in visual coords, but use document # coords to determine the tick length trs = self.axis.transforms visual_to_document = trs.get_transform('visual', 'document') direction = np.array(self.axis.tick_direction) direction /= np.linalg.norm(direction) if self._anchors is None: # use the document (pixel) coord system to set text anchors anchors = [] if direction[0] < 0: anchors.append('right') elif direction[0] > 0: anchors.append('left') else: anchors.append('center') if direction[1] < 0: anchors.append('bottom') elif direction[1] > 0: anchors.append('top') else: anchors.append('middle') else: anchors = self._anchors # now figure out the tick positions in visual (data) coords doc_unit = visual_to_document.map([[0, 0], direction[:2]]) doc_unit = doc_unit[1] - doc_unit[0] doc_len = np.linalg.norm(doc_unit) vectors = np.array([[0., 0.], direction * self.axis.minor_tick_length / doc_len, direction * self.axis.major_tick_length / doc_len, direction * (self.axis.major_tick_length + self.axis.tick_label_margin) / doc_len ], dtype=float) minor_vector = vectors[1] - vectors[0] major_vector = vectors[2] - vectors[0] label_vector = vectors[3] - vectors[0] axislabel_vector = direction * (self.axis.major_tick_length + self.axis.axis_label_margin) / doc_len major_origins, major_endpoints = self._tile_ticks( major_tick_fractions, major_vector) minor_origins, minor_endpoints = self._tile_ticks( minor_tick_fractions, minor_vector) tick_label_pos = major_origins + label_vector axis_label_pos = 0.5 * (self.axis.pos[0] + self.axis.pos[1]) + axislabel_vector num_major = len(major_tick_fractions) num_minor = len(minor_tick_fractions) c = np.empty([(num_major + num_minor) * 2, 2]) c[0:(num_major-1)*2+1:2] = major_origins c[1:(num_major-1)*2+2:2] = major_endpoints c[(num_major-1)*2+2::2] = minor_origins c[(num_major-1)*2+3::2] = minor_endpoints return c, tick_label_pos, axis_label_pos, anchors def _tile_ticks(self, frac, tickvec): """Tiles tick marks along the axis.""" origins = np.tile(self.axis._vec, (len(frac), 1)) origins = self.axis.pos[0].T + (origins.T*frac).T endpoints = tickvec + origins return origins, endpoints def _get_tick_frac_labels(self): """Get the major ticks, minor ticks, and major labels""" minor_num = 4 # number of minor ticks per major division if (self.axis.scale_type == 'linear'): domain = self.axis.domain if domain[1] < domain[0]: flip = True domain = domain[::-1] else: flip = False offset = domain[0] scale = domain[1] - domain[0] transforms = self.axis.transforms length = self.axis.pos[1] - self.axis.pos[0] # in logical coords n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi # major = np.linspace(domain[0], domain[1], num=11) # major = MaxNLocator(10).tick_values(*domain) major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2) labels = ['%g' % x for x in major] majstep = major[1] - major[0] minor = [] minstep = majstep / (minor_num + 1) minstart = 0 if self.axis._stop_at_major[0] else -1 minstop = -1 if self.axis._stop_at_major[1] else 0 for i in range(minstart, len(major) + minstop): maj = major[0] + i * majstep minor.extend(np.linspace(maj + minstep, maj + majstep - minstep, minor_num)) major_frac = (major - offset) / scale minor_frac = (np.array(minor) - offset) / scale major_frac = major_frac[::-1] if flip else major_frac use_mask = (major_frac > -0.0001) & (major_frac < 1.0001) major_frac = major_frac[use_mask] labels = [l for li, l in enumerate(labels) if use_mask[li]] minor_frac = minor_frac[(minor_frac > -0.0001) & (minor_frac < 1.0001)] elif self.axis.scale_type == 'logarithmic': return NotImplementedError elif self.axis.scale_type == 'power': return NotImplementedError return major_frac, minor_frac, labels # ############################################################################# # Translated from matplotlib class MaxNLocator(object): """ Select no more than N intervals at nice locations. """ def __init__(self, nbins=10, steps=None, trim=True, integer=False, symmetric=False, prune=None): """ Keyword args: *nbins* Maximum number of intervals; one less than max number of ticks. *steps* Sequence of nice numbers starting with 1 and ending with 10; e.g., [1, 2, 4, 5, 10] *integer* If True, ticks will take only integer values. *symmetric* If True, autoscaling will result in a range symmetric about zero. *prune* ['lower' | 'upper' | 'both' | None] Remove edge ticks -- useful for stacked or ganged plots where the upper tick of one axes overlaps with the lower tick of the axes above it. If prune=='lower', the smallest tick will be removed. If prune=='upper', the largest tick will be removed. If prune=='both', the largest and smallest ticks will be removed. If prune==None, no ticks will be removed. """ self._nbins = int(nbins) self._trim = trim self._integer = integer self._symmetric = symmetric if prune is not None and prune not in ['upper', 'lower', 'both']: raise ValueError( "prune must be 'upper', 'lower', 'both', or None") self._prune = prune if steps is None: steps = [1, 2, 2.5, 3, 4, 5, 6, 8, 10] else: if int(steps[-1]) != 10: steps = list(steps) steps.append(10) self._steps = steps self._integer = integer if self._integer: self._steps = [n for n in self._steps if divmod(n, 1)[1] < 0.001] def bin_boundaries(self, vmin, vmax): nbins = self._nbins scale, offset = scale_range(vmin, vmax, nbins) if self._integer: scale = max(1, scale) vmin = vmin - offset vmax = vmax - offset raw_step = (vmax - vmin) / nbins scaled_raw_step = raw_step / scale best_vmax = vmax best_vmin = vmin for step in self._steps: if step < scaled_raw_step: continue step *= scale best_vmin = step * divmod(vmin, step)[0] best_vmax = best_vmin + step * nbins if (best_vmax >= vmax): break if self._trim: extra_bins = int(divmod((best_vmax - vmax), step)[0]) nbins -= extra_bins return (np.arange(nbins + 1) * step + best_vmin + offset) def __call__(self): vmin, vmax = self.axis.get_view_interval() return self.tick_values(vmin, vmax) def tick_values(self, vmin, vmax): locs = self.bin_boundaries(vmin, vmax) prune = self._prune if prune == 'lower': locs = locs[1:] elif prune == 'upper': locs = locs[:-1] elif prune == 'both': locs = locs[1:-1] return locs def view_limits(self, dmin, dmax): if self._symmetric: maxabs = max(abs(dmin), abs(dmax)) dmin = -maxabs dmax = maxabs return np.take(self.bin_boundaries(dmin, dmax), [0, -1]) def scale_range(vmin, vmax, n=1, threshold=100): dv = abs(vmax - vmin) if dv == 0: # maxabsv == 0 is a special case of this. return 1.0, 0.0 # Note: this should never occur because # vmin, vmax should have been checked by nonsingular(), # and spread apart if necessary. meanv = 0.5 * (vmax + vmin) if abs(meanv) / dv < threshold: offset = 0 elif meanv > 0: ex = divmod(np.log10(meanv), 1)[0] offset = 10 ** ex else: ex = divmod(np.log10(-meanv), 1)[0] offset = -10 ** ex ex = divmod(np.log10(dv / n), 1)[0] scale = 10 ** ex return scale, offset # ############################################################################# # Tranlated from http://www.justintalbot.com/research/axis-labeling/ # See "An Extension of Wilkinson's Algorithm for Positioning Tick Labels # on Axes" # by Justin Talbot, Sharon Lin, and Pat Hanrahan, InfoVis 2010. def _coverage(dmin, dmax, lmin, lmax): return 1 - 0.5 * ((dmax - lmax) ** 2 + (dmin - lmin) ** 2) / (0.1 * (dmax - dmin)) ** 2 def _coverage_max(dmin, dmax, span): range_ = dmax - dmin if span <= range_: return 1. else: half = (span - range_) / 2.0 return 1 - half ** 2 / (0.1 * range_) ** 2 def _density(k, m, dmin, dmax, lmin, lmax): r = (k-1.0) / (lmax-lmin) rt = (m-1.0) / (max(lmax, dmax) - min(lmin, dmin)) return 2 - max(r / rt, rt / r) def _density_max(k, m): return 2 - (k-1.0) / (m-1.0) if k >= m else 1. def _simplicity(q, Q, j, lmin, lmax, lstep): eps = 1e-10 n = len(Q) i = Q.index(q) + 1 if ((lmin % lstep) < eps or (lstep - lmin % lstep) < eps) and lmin <= 0 and lmax >= 0: v = 1 else: v = 0 return (n - i) / (n - 1.0) + v - j def _simplicity_max(q, Q, j): n = len(Q) i = Q.index(q) + 1 return (n - i)/(n - 1.0) + 1. - j def _get_ticks_talbot(dmin, dmax, n_inches, density=1.): # density * size gives target number of intervals, # density * size + 1 gives target number of tick marks, # the density function converts this back to a density in data units # (not inches) n_inches = max(n_inches, 2.0) # Set minimum otherwise code can crash :( if dmin == dmax: return np.array([dmin, dmax]) m = density * n_inches + 1.0 only_inside = False # we cull values outside ourselves Q = [1, 5, 2, 2.5, 4, 3] w = [0.25, 0.2, 0.5, 0.05] best_score = -2.0 best = None j = 1.0 n_max = 1000 while j < n_max: for q in Q: sm = _simplicity_max(q, Q, j) if w[0] * sm + w[1] + w[2] + w[3] < best_score: j = n_max break k = 2.0 while k < n_max: dm = _density_max(k, n_inches) if w[0] * sm + w[1] + w[2] * dm + w[3] < best_score: break delta = (dmax-dmin)/(k+1.0)/j/q z = np.ceil(np.log10(delta)) while z < float('infinity'): step = j * q * 10 ** z cm = _coverage_max(dmin, dmax, step*(k-1.0)) if (w[0] * sm + w[1] * cm + w[2] * dm + w[3] < best_score): break min_start = np.floor(dmax/step)*j - (k-1.0)*j max_start = np.ceil(dmin/step)*j if min_start > max_start: z = z+1 break for start in range(int(min_start), int(max_start)+1): lmin = start * (step/j) lmax = lmin + step*(k-1.0) lstep = step s = _simplicity(q, Q, j, lmin, lmax, lstep) c = _coverage(dmin, dmax, lmin, lmax) d = _density(k, m, dmin, dmax, lmin, lmax) leg = 1. # _legibility(lmin, lmax, lstep) score = w[0] * s + w[1] * c + w[2] * d + w[3] * leg if (score > best_score and (not only_inside or (lmin >= dmin and lmax <= dmax))): best_score = score best = (lmin, lmax, lstep, q, k) z += 1 k += 1 if k == n_max: raise RuntimeError('could not converge on ticks') j += 1 if j == n_max: raise RuntimeError('could not converge on ticks') if best is None: raise RuntimeError('could not converge on ticks') return np.arange(best[4]) * best[2] + best[0] Axis = create_visual_node(AxisVisual)
bsd-2-clause
bulik/ldsc
ldscore/regressions.py
1
29518
''' (c) 2014 Brendan Bulik-Sullivan and Hilary Finucane Estimators of heritability and genetic correlation. Shape convention is (n_snp, n_annot) for all classes. Last column = intercept. ''' from __future__ import division import numpy as np import pandas as pd from scipy.stats import norm, chi2 import jackknife as jk from irwls import IRWLS from scipy.stats import t as tdist from collections import namedtuple np.seterr(divide='raise', invalid='raise') s = lambda x: remove_brackets(str(np.matrix(x))) def update_separators(s, ii): '''s are separators with ii masked. Returns unmasked separators.''' maplist = np.arange(len(ii))[np.squeeze(ii)] mask_to_unmask = lambda i: maplist[i] t = np.apply_along_axis(mask_to_unmask, 0, s[1:-1]) t = np.hstack(((0), t, (len(ii)))) return t def p_z_norm(est, se): '''Convert estimate and se to Z-score and P-value.''' try: Z = est / se except (FloatingPointError, ZeroDivisionError): Z = float('inf') P = chi2.sf(Z ** 2, 1, loc=0, scale=1) # 0 if Z=inf return P, Z def remove_brackets(x): '''Get rid of brackets and trailing whitespace in numpy arrays.''' return x.replace('[', '').replace(']', '').strip() def append_intercept(x): ''' Appends an intercept term to the design matrix for a linear regression. Parameters ---------- x : np.matrix with shape (n_row, n_col) Design matrix. Columns are predictors; rows are observations. Returns ------- x_new : np.matrix with shape (n_row, n_col+1) Design matrix with intercept term appended. ''' n_row = x.shape[0] intercept = np.ones((n_row, 1)) x_new = np.concatenate((x, intercept), axis=1) return x_new def remove_intercept(x): '''Removes the last column.''' n_col = x.shape[1] return x[:, 0:n_col - 1] def gencov_obs_to_liab(gencov_obs, P1, P2, K1, K2): ''' Converts genetic covariance on the observed scale in an ascertained sample to genetic covariance on the liability scale in the population Parameters ---------- gencov_obs : float Genetic covariance on the observed scale in an ascertained sample. P1, P2 : float in (0,1) Prevalences of phenotypes 1,2 in the sample. K1, K2 : float in (0,1) Prevalences of phenotypes 1,2 in the population. Returns ------- gencov_liab : float Genetic covariance between liabilities in the population. Note: if a trait is a QT, set P = K = None. ''' c1 = 1 c2 = 1 if P1 is not None and K1 is not None: c1 = np.sqrt(h2_obs_to_liab(1, P1, K1)) if P2 is not None and K2 is not None: c2 = np.sqrt(h2_obs_to_liab(1, P2, K2)) return gencov_obs * c1 * c2 def h2_obs_to_liab(h2_obs, P, K): ''' Converts heritability on the observed scale in an ascertained sample to heritability on the liability scale in the population. Parameters ---------- h2_obs : float Heritability on the observed scale in an ascertained sample. P : float in (0,1) Prevalence of the phenotype in the sample. K : float in (0,1) Prevalence of the phenotype in the population. Returns ------- h2_liab : float Heritability of liability in the population. ''' if np.isnan(P) and np.isnan(K): return h2_obs if K <= 0 or K >= 1: raise ValueError('K must be in the range (0,1)') if P <= 0 or P >= 1: raise ValueError('P must be in the range (0,1)') thresh = norm.isf(K) conversion_factor = K ** 2 * \ (1 - K) ** 2 / (P * (1 - P) * norm.pdf(thresh) ** 2) return h2_obs * conversion_factor class LD_Score_Regression(object): def __init__(self, y, x, w, N, M, n_blocks, intercept=None, slow=False, step1_ii=None, old_weights=False): for i in [y, x, w, M, N]: try: if len(i.shape) != 2: raise TypeError('Arguments must be 2D arrays.') except AttributeError: raise TypeError('Arguments must be arrays.') n_snp, self.n_annot = x.shape if any(i.shape != (n_snp, 1) for i in [y, w, N]): raise ValueError( 'N, weights and response (z1z2 or chisq) must have shape (n_snp, 1).') if M.shape != (1, self.n_annot): raise ValueError('M must have shape (1, n_annot).') M_tot = float(np.sum(M)) x_tot = np.sum(x, axis=1).reshape((n_snp, 1)) self.constrain_intercept = intercept is not None self.intercept = intercept self.n_blocks = n_blocks tot_agg = self.aggregate(y, x_tot, N, M_tot, intercept) initial_w = self._update_weights( x_tot, w, N, M_tot, tot_agg, intercept) Nbar = np.mean(N) # keep condition number low x = np.multiply(N, x) / Nbar if not self.constrain_intercept: x, x_tot = append_intercept(x), append_intercept(x_tot) yp = y else: yp = y - intercept self.intercept_se = 'NA' del y self.twostep_filtered = None if step1_ii is not None and self.constrain_intercept: raise ValueError( 'twostep is not compatible with constrain_intercept.') elif step1_ii is not None and self.n_annot > 1: raise ValueError( 'twostep not compatible with partitioned LD Score yet.') elif step1_ii is not None: n1 = np.sum(step1_ii) self.twostep_filtered = n_snp - n1 x1 = x[np.squeeze(step1_ii), :] yp1, w1, N1, initial_w1 = map( lambda a: a[step1_ii].reshape((n1, 1)), (yp, w, N, initial_w)) update_func1 = lambda a: self._update_func( a, x1, w1, N1, M_tot, Nbar, ii=step1_ii) step1_jknife = IRWLS( x1, yp1, update_func1, n_blocks, slow=slow, w=initial_w1) step1_int, _ = self._intercept(step1_jknife) yp = yp - step1_int x = remove_intercept(x) x_tot = remove_intercept(x_tot) update_func2 = lambda a: self._update_func( a, x_tot, w, N, M_tot, Nbar, step1_int) s = update_separators(step1_jknife.separators, step1_ii) step2_jknife = IRWLS( x, yp, update_func2, n_blocks, slow=slow, w=initial_w, separators=s) c = np.sum(np.multiply(initial_w, x)) / \ np.sum(np.multiply(initial_w, np.square(x))) jknife = self._combine_twostep_jknives( step1_jknife, step2_jknife, M_tot, c, Nbar) elif old_weights: initial_w = np.sqrt(initial_w) x = IRWLS._weight(x, initial_w) y = IRWLS._weight(yp, initial_w) jknife = jk.LstsqJackknifeFast(x, y, n_blocks) else: update_func = lambda a: self._update_func( a, x_tot, w, N, M_tot, Nbar, intercept) jknife = IRWLS( x, yp, update_func, n_blocks, slow=slow, w=initial_w) self.coef, self.coef_cov, self.coef_se = self._coef(jknife, Nbar) self.cat, self.cat_cov, self.cat_se =\ self._cat(jknife, M, Nbar, self.coef, self.coef_cov) self.tot, self.tot_cov, self.tot_se = self._tot(self.cat, self.cat_cov) self.prop, self.prop_cov, self.prop_se =\ self._prop(jknife, M, Nbar, self.cat, self.tot) self.enrichment, self.M_prop = self._enrichment( M, M_tot, self.cat, self.tot) if not self.constrain_intercept: self.intercept, self.intercept_se = self._intercept(jknife) self.jknife = jknife self.tot_delete_values = self._delete_vals_tot(jknife, Nbar, M) self.part_delete_values = self._delete_vals_part(jknife, Nbar, M) if not self.constrain_intercept: self.intercept_delete_values = jknife.delete_values[ :, self.n_annot] self.M = M @classmethod def aggregate(cls, y, x, N, M, intercept=None): if intercept is None: intercept = cls.__null_intercept__ num = M * (np.mean(y) - intercept) denom = np.mean(np.multiply(x, N)) return num / denom def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None): raise NotImplementedError def _delete_vals_tot(self, jknife, Nbar, M): '''Get delete values for total h2 or gencov.''' n_annot = self.n_annot tot_delete_vals = jknife.delete_values[ :, 0:n_annot] # shape (n_blocks, n_annot) # shape (n_blocks, 1) tot_delete_vals = np.dot(tot_delete_vals, M.T) / Nbar return tot_delete_vals def _delete_vals_part(self, jknife, Nbar, M): '''Get delete values for partitioned h2 or gencov.''' n_annot = self.n_annot return jknife.delete_values[:, 0:n_annot] / Nbar def _coef(self, jknife, Nbar): '''Get coefficient estimates + cov from the jackknife.''' n_annot = self.n_annot coef = jknife.est[0, 0:n_annot] / Nbar coef_cov = jknife.jknife_cov[0:n_annot, 0:n_annot] / Nbar ** 2 coef_se = np.sqrt(np.diag(coef_cov)) return coef, coef_cov, coef_se def _cat(self, jknife, M, Nbar, coef, coef_cov): '''Convert coefficients to per-category h2 or gencov.''' cat = np.multiply(M, coef) cat_cov = np.multiply(np.dot(M.T, M), coef_cov) cat_se = np.sqrt(np.diag(cat_cov)) return cat, cat_cov, cat_se def _tot(self, cat, cat_cov): '''Convert per-category h2 to total h2 or gencov.''' tot = np.sum(cat) tot_cov = np.sum(cat_cov) tot_se = np.sqrt(tot_cov) return tot, tot_cov, tot_se def _prop(self, jknife, M, Nbar, cat, tot): '''Convert total h2 and per-category h2 to per-category proportion h2 or gencov.''' n_annot = self.n_annot n_blocks = jknife.delete_values.shape[0] numer_delete_vals = np.multiply( M, jknife.delete_values[:, 0:n_annot]) / Nbar # (n_blocks, n_annot) denom_delete_vals = np.sum( numer_delete_vals, axis=1).reshape((n_blocks, 1)) denom_delete_vals = np.dot(denom_delete_vals, np.ones((1, n_annot))) prop = jk.RatioJackknife( cat / tot, numer_delete_vals, denom_delete_vals) return prop.est, prop.jknife_cov, prop.jknife_se def _enrichment(self, M, M_tot, cat, tot): '''Compute proportion of SNPs per-category enrichment for h2 or gencov.''' M_prop = M / M_tot enrichment = np.divide(cat, M) / (tot / M_tot) return enrichment, M_prop def _intercept(self, jknife): '''Extract intercept and intercept SE from block jackknife.''' n_annot = self.n_annot intercept = jknife.est[0, n_annot] intercept_se = jknife.jknife_se[0, n_annot] return intercept, intercept_se def _combine_twostep_jknives(self, step1_jknife, step2_jknife, M_tot, c, Nbar=1): '''Combine free intercept and constrained intercept jackknives for --two-step.''' n_blocks, n_annot = step1_jknife.delete_values.shape n_annot -= 1 if n_annot > 2: raise ValueError( 'twostep not yet implemented for partitioned LD Score.') step1_int, _ = self._intercept(step1_jknife) est = np.hstack( (step2_jknife.est, np.array(step1_int).reshape((1, 1)))) delete_values = np.zeros((n_blocks, n_annot + 1)) delete_values[:, n_annot] = step1_jknife.delete_values[:, n_annot] delete_values[:, 0:n_annot] = step2_jknife.delete_values -\ c * (step1_jknife.delete_values[:, n_annot] - step1_int).reshape((n_blocks, n_annot)) # check this pseudovalues = jk.Jackknife.delete_values_to_pseudovalues( delete_values, est) jknife_est, jknife_var, jknife_se, jknife_cov = jk.Jackknife.jknife( pseudovalues) jknife = namedtuple('jknife', ['est', 'jknife_se', 'jknife_est', 'jknife_var', 'jknife_cov', 'delete_values']) return jknife(est, jknife_se, jknife_est, jknife_var, jknife_cov, delete_values) class Hsq(LD_Score_Regression): __null_intercept__ = 1 def __init__(self, y, x, w, N, M, n_blocks=200, intercept=None, slow=False, twostep=None, old_weights=False): step1_ii = None if twostep is not None: step1_ii = y < twostep LD_Score_Regression.__init__(self, y, x, w, N, M, n_blocks, intercept=intercept, slow=slow, step1_ii=step1_ii, old_weights=old_weights) self.mean_chisq, self.lambda_gc = self._summarize_chisq(y) if not self.constrain_intercept: self.ratio, self.ratio_se = self._ratio( self.intercept, self.intercept_se, self.mean_chisq) def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None): ''' Update function for IRWLS x is the output of np.linalg.lstsq. x[0] is the regression coefficients x[0].shape is (# of dimensions, 1) the last element of x[0] is the intercept. intercept is None --> free intercept intercept is not None --> constrained intercept ''' hsq = M * x[0][0] / Nbar if intercept is None: intercept = max(x[0][1]) # divide by zero error if intercept < 0 else: if ref_ld_tot.shape[1] > 1: raise ValueError( 'Design matrix has intercept column for constrained intercept regression!') ld = ref_ld_tot[:, 0].reshape(w_ld.shape) # remove intercept w = self.weights(ld, w_ld, N, M, hsq, intercept, ii) return w def _summarize_chisq(self, chisq): '''Compute mean chi^2 and lambda_GC.''' mean_chisq = np.mean(chisq) # median and matrix don't play nice lambda_gc = np.median(np.asarray(chisq)) / 0.4549 return mean_chisq, lambda_gc def _ratio(self, intercept, intercept_se, mean_chisq): '''Compute ratio (intercept - 1) / (mean chi^2 -1 ).''' if mean_chisq > 1: ratio_se = intercept_se / (mean_chisq - 1) ratio = (intercept - 1) / (mean_chisq - 1) else: ratio = 'NA' ratio_se = 'NA' return ratio, ratio_se def _overlap_output(self, category_names, overlap_matrix, M_annot, M_tot, print_coefficients): '''LD Score regression summary for overlapping categories.''' overlap_matrix_prop = np.zeros([self.n_annot,self.n_annot]) for i in range(self.n_annot): overlap_matrix_prop[i, :] = overlap_matrix[i, :] / M_annot prop_hsq_overlap = np.dot( overlap_matrix_prop, self.prop.T).reshape((1, self.n_annot)) prop_hsq_overlap_var = np.diag( np.dot(np.dot(overlap_matrix_prop, self.prop_cov), overlap_matrix_prop.T)) prop_hsq_overlap_se = np.sqrt( np.maximum(0, prop_hsq_overlap_var)).reshape((1, self.n_annot)) one_d_convert = lambda x: np.array(x).reshape(np.prod(x.shape)) prop_M_overlap = M_annot / M_tot enrichment = prop_hsq_overlap / prop_M_overlap enrichment_se = prop_hsq_overlap_se / prop_M_overlap overlap_matrix_diff = np.zeros([self.n_annot,self.n_annot]) for i in range(self.n_annot): if not M_tot == M_annot[0,i]: overlap_matrix_diff[i, :] = overlap_matrix[i,:]/M_annot[0,i] - \ (M_annot - overlap_matrix[i,:]) / (M_tot-M_annot[0,i]) diff_est = np.dot(overlap_matrix_diff,self.coef) diff_cov = np.dot(np.dot(overlap_matrix_diff,self.coef_cov),overlap_matrix_diff.T) diff_se = np.sqrt(np.diag(diff_cov)) diff_p = ['NA' if diff_se[i]==0 else 2*tdist.sf(abs(diff_est[i]/diff_se[i]),self.n_blocks) \ for i in range(self.n_annot)] df = pd.DataFrame({ 'Category': category_names, 'Prop._SNPs': one_d_convert(prop_M_overlap), 'Prop._h2': one_d_convert(prop_hsq_overlap), 'Prop._h2_std_error': one_d_convert(prop_hsq_overlap_se), 'Enrichment': one_d_convert(enrichment), 'Enrichment_std_error': one_d_convert(enrichment_se), 'Enrichment_p':diff_p, 'Coefficient': one_d_convert(self.coef), 'Coefficient_std_error': self.coef_se, 'Coefficient_z-score': one_d_convert(self.coef) / one_d_convert(self.coef_se) }) if print_coefficients: df = df[['Category', 'Prop._SNPs', 'Prop._h2', 'Prop._h2_std_error', 'Enrichment','Enrichment_std_error', 'Enrichment_p', 'Coefficient', 'Coefficient_std_error','Coefficient_z-score']] else: df = df[['Category', 'Prop._SNPs', 'Prop._h2', 'Prop._h2_std_error', 'Enrichment','Enrichment_std_error', 'Enrichment_p']] return df def summary(self, ref_ld_colnames=None, P=None, K=None, overlap=False): '''Print summary of the LD Score Regression.''' if P is not None and K is not None: T = 'Liability' c = h2_obs_to_liab(1, P, K) else: T = 'Observed' c = 1 out = ['Total ' + T + ' scale h2: ' + s(c * self.tot) + ' (' + s(c * self.tot_se) + ')'] if self.n_annot > 1: if ref_ld_colnames is None: ref_ld_colnames = ['CAT_' + str(i) for i in xrange(self.n_annot)] out.append('Categories: ' + ' '.join(ref_ld_colnames)) if not overlap: out.append(T + ' scale h2: ' + s(c * self.cat)) out.append(T + ' scale h2 SE: ' + s(c * self.cat_se)) out.append('Proportion of SNPs: ' + s(self.M_prop)) out.append('Proportion of h2g: ' + s(self.prop)) out.append('Enrichment: ' + s(self.enrichment)) out.append('Coefficients: ' + s(self.coef)) out.append('Coefficient SE: ' + s(self.coef_se)) out.append('Lambda GC: ' + s(self.lambda_gc)) out.append('Mean Chi^2: ' + s(self.mean_chisq)) if self.constrain_intercept: out.append( 'Intercept: constrained to {C}'.format(C=s(self.intercept))) else: out.append( 'Intercept: ' + s(self.intercept) + ' (' + s(self.intercept_se) + ')') if self.mean_chisq > 1: if self.ratio < 0: out.append( 'Ratio < 0 (usually indicates GC correction).') else: out.append( 'Ratio: ' + s(self.ratio) + ' (' + s(self.ratio_se) + ')') else: out.append('Ratio: NA (mean chi^2 < 1)') return remove_brackets('\n'.join(out)) def _update_weights(self, ld, w_ld, N, M, hsq, intercept, ii=None): if intercept is None: intercept = self.__null_intercept__ return self.weights(ld, w_ld, N, M, hsq, intercept, ii) @classmethod def weights(cls, ld, w_ld, N, M, hsq, intercept=None, ii=None): ''' Regression weights. Parameters ---------- ld : np.matrix with shape (n_snp, 1) LD Scores (non-partitioned). w_ld : np.matrix with shape (n_snp, 1) LD Scores (non-partitioned) computed with sum r^2 taken over only those SNPs included in the regression. N : np.matrix of ints > 0 with shape (n_snp, 1) Number of individuals sampled for each SNP. M : float > 0 Number of SNPs used for estimating LD Score (need not equal number of SNPs included in the regression). hsq : float in [0,1] Heritability estimate. Returns ------- w : np.matrix with shape (n_snp, 1) Regression weights. Approx equal to reciprocal of conditional variance function. ''' M = float(M) if intercept is None: intercept = 1 hsq = max(hsq, 0.0) hsq = min(hsq, 1.0) ld = np.fmax(ld, 1.0) w_ld = np.fmax(w_ld, 1.0) c = hsq * N / M het_w = 1.0 / (2 * np.square(intercept + np.multiply(c, ld))) oc_w = 1.0 / w_ld w = np.multiply(het_w, oc_w) return w class Gencov(LD_Score_Regression): __null_intercept__ = 0 def __init__(self, z1, z2, x, w, N1, N2, M, hsq1, hsq2, intercept_hsq1, intercept_hsq2, n_blocks=200, intercept_gencov=None, slow=False, twostep=None): self.intercept_hsq1 = intercept_hsq1 self.intercept_hsq2 = intercept_hsq2 self.hsq1 = hsq1 self.hsq2 = hsq2 self.N1 = N1 self.N2 = N2 y = z1 * z2 step1_ii = None if twostep is not None: step1_ii = np.logical_and(z1**2 < twostep, z2**2 < twostep) LD_Score_Regression.__init__(self, y, x, w, np.sqrt(N1 * N2), M, n_blocks, intercept=intercept_gencov, slow=slow, step1_ii=step1_ii) self.p, self.z = p_z_norm(self.tot, self.tot_se) self.mean_z1z2 = np.mean(np.multiply(z1, z2)) def summary(self, ref_ld_colnames, P=None, K=None): '''Print summary of the LD Score regression.''' out = [] if P is not None and K is not None and\ all((i is not None for i in P)) and all((i is not None for i in K)): T = 'Liability' c = gencov_obs_to_liab(1, P[0], P[1], K[0], K[1]) else: T = 'Observed' c = 1 out.append('Total ' + T + ' scale gencov: ' + s(c * self.tot) + ' (' + s(c * self.tot_se) + ')') if self.n_annot > 1: out.append('Categories: ' + str(' '.join(ref_ld_colnames))) out.append(T + ' scale gencov: ' + s(c * self.cat)) out.append(T + ' scale gencov SE: ' + s(c * self.cat_se)) out.append('Proportion of SNPs: ' + s(self.M_prop)) out.append('Proportion of gencov: ' + s(self.prop)) out.append('Enrichment: ' + s(self.enrichment)) out.append('Mean z1*z2: ' + s(self.mean_z1z2)) if self.constrain_intercept: out.append( 'Intercept: constrained to {C}'.format(C=s(self.intercept))) else: out.append( 'Intercept: ' + s(self.intercept) + ' (' + s(self.intercept_se) + ')') return remove_brackets('\n'.join(out)) def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None): ''' Update function for IRWLS x is the output of np.linalg.lstsq. x[0] is the regression coefficients x[0].shape is (# of dimensions, 1) the last element of x[0] is the intercept. ''' rho_g = M * x[0][0] / Nbar if intercept is None: # if the regression includes an intercept intercept = x[0][1] # remove intercept if we have one ld = ref_ld_tot[:, 0].reshape(w_ld.shape) if ii is not None: N1 = self.N1[ii].reshape((w_ld.shape)) N2 = self.N2[ii].reshape((w_ld.shape)) else: N1 = self.N1 N2 = self.N2 return self.weights(ld, w_ld, N1, N2, np.sum(M), self.hsq1, self.hsq2, rho_g, intercept, self.intercept_hsq1, self.intercept_hsq2, ii) def _update_weights(self, ld, w_ld, sqrt_n1n2, M, rho_g, intercept, ii=None): '''Weight function with the same signature for Hsq and Gencov.''' w = self.weights(ld, w_ld, self.N1, self.N2, M, self.hsq1, self.hsq2, rho_g, intercept, self.intercept_hsq1, self.intercept_hsq2) return w @classmethod def weights(cls, ld, w_ld, N1, N2, M, h1, h2, rho_g, intercept_gencov=None, intercept_hsq1=None, intercept_hsq2=None, ii=None): ''' Regression weights. Parameters ---------- ld : np.matrix with shape (n_snp, 1) LD Scores (non-partitioned) w_ld : np.matrix with shape (n_snp, 1) LD Scores (non-partitioned) computed with sum r^2 taken over only those SNPs included in the regression. M : float > 0 Number of SNPs used for estimating LD Score (need not equal number of SNPs included in the regression). N1, N2 : np.matrix of ints > 0 with shape (n_snp, 1) Number of individuals sampled for each SNP for each study. h1, h2 : float in [0,1] Heritability estimates for each study. rhog : float in [0,1] Genetic covariance estimate. intercept : float Genetic covariance intercept, on the z1*z2 scale (so should be Ns*rho/sqrt(N1*N2)). Returns ------- w : np.matrix with shape (n_snp, 1) Regression weights. Approx equal to reciprocal of conditional variance function. ''' M = float(M) if intercept_gencov is None: intercept_gencov = 0 if intercept_hsq1 is None: intercept_hsq1 = 1 if intercept_hsq2 is None: intercept_hsq2 = 1 h1, h2 = max(h1, 0.0), max(h2, 0.0) h1, h2 = min(h1, 1.0), min(h2, 1.0) rho_g = min(rho_g, 1.0) rho_g = max(rho_g, -1.0) ld = np.fmax(ld, 1.0) w_ld = np.fmax(w_ld, 1.0) a = np.multiply(N1, h1 * ld) / M + intercept_hsq1 b = np.multiply(N2, h2 * ld) / M + intercept_hsq2 sqrt_n1n2 = np.sqrt(np.multiply(N1, N2)) c = np.multiply(sqrt_n1n2, rho_g * ld) / M + intercept_gencov try: het_w = 1.0 / (np.multiply(a, b) + np.square(c)) except FloatingPointError: # bizarre error; should never happen raise FloatingPointError('Why did you set hsq intercept <= 0?') oc_w = 1.0 / w_ld w = np.multiply(het_w, oc_w) return w class RG(object): def __init__(self, z1, z2, x, w, N1, N2, M, intercept_hsq1=None, intercept_hsq2=None, intercept_gencov=None, n_blocks=200, slow=False, twostep=None): self.intercept_gencov = intercept_gencov self._negative_hsq = None n_snp, n_annot = x.shape hsq1 = Hsq(np.square(z1), x, w, N1, M, n_blocks=n_blocks, intercept=intercept_hsq1, slow=slow, twostep=twostep) hsq2 = Hsq(np.square(z2), x, w, N2, M, n_blocks=n_blocks, intercept=intercept_hsq2, slow=slow, twostep=twostep) gencov = Gencov(z1, z2, x, w, N1, N2, M, hsq1.tot, hsq2.tot, hsq1.intercept, hsq2.intercept, n_blocks, intercept_gencov=intercept_gencov, slow=slow, twostep=twostep) gencov.N1 = None # save memory gencov.N2 = None self.hsq1, self.hsq2, self.gencov = hsq1, hsq2, gencov if (hsq1.tot <= 0 or hsq2.tot <= 0): self._negative_hsq = True self.rg_ratio = self.rg = self.rg_se = 'NA' self.p = self.z = 'NA' else: rg_ratio = np.array( gencov.tot / np.sqrt(hsq1.tot * hsq2.tot)).reshape((1, 1)) denom_delete_values = np.sqrt( np.multiply(hsq1.tot_delete_values, hsq2.tot_delete_values)) rg = jk.RatioJackknife( rg_ratio, gencov.tot_delete_values, denom_delete_values) self.rg_jknife = float(rg.jknife_est) self.rg_se = float(rg.jknife_se) self.rg_ratio = float(rg_ratio) self.p, self.z = p_z_norm(self.rg_ratio, self.rg_se) def summary(self, silly=False): '''Print output of Gencor object.''' out = [] if self._negative_hsq: out.append('Genetic Correlation: nan (nan) (h2 out of bounds) ') out.append('Z-score: nan (nan) (h2 out of bounds)') out.append('P: nan (nan) (h2 out of bounds)') out.append('WARNING: One of the h2\'s was out of bounds.') out.append( 'This usually indicates a data-munging error ' + 'or that h2 or N is low.') elif (self.rg_ratio > 1.2 or self.rg_ratio < -1.2) and not silly: out.append('Genetic Correlation: nan (nan) (rg out of bounds) ') out.append('Z-score: nan (nan) (rg out of bounds)') out.append('P: nan (nan) (rg out of bounds)') out.append('WARNING: rg was out of bounds.') if self.intercept_gencov is None: out.append( 'This often means that h2 is not significantly ' + 'different from zero.') else: out.append( 'This often means that you have constrained' + ' the intercepts to the wrong values.') else: out.append( 'Genetic Correlation: ' + s(self.rg_ratio) + ' (' + s(self.rg_se) + ')') out.append('Z-score: ' + s(self.z)) out.append('P: ' + s(self.p)) return remove_brackets('\n'.join(out))
gpl-3.0
sanja7s/EEDC
src/distributions/job_steps_distribution.py
1
3676
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ author: sanja7s --------------- plot the distribution """ import os import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib from collections import defaultdict from matplotlib import colors from mpl_toolkits.axes_grid import inset_locator #matplotlib.style.use('ggplot') IN_DIR = "../data/jobs" os.chdir(IN_DIR) font = {'family' : 'sans-serif', 'variant' : 'normal', 'weight' : 'light', 'size' : 14} grid = {'color' : 'gray', 'alpha' : 0.5, 'linestyle' : '-.'} lines = {'color' : 'gray'} #xticks = {'color' : 'gray'} matplotlib.rc('font', **font) matplotlib.rc('grid', **grid) matplotlib.rc('lines', **lines) #matplotlib.rc('ticks', **ticks) def read_in_data(f_in = 'all_job_ids.csv'): i = 0 distr = defaultdict(int) with open(f_in, 'r') as f: for line in f: job_id = line.strip()[1:-1] try: main_id, step_id = job_id.split('.') except ValueError as e: main_id = job_id distr[main_id] += 1 return distr.values() def hist_plot_data(): distr = read_in_data() plt.hist(distr) plt.show() def create_distribution(x): d = defaultdict(int) for el in x: d[el] += 1 return d def simple_plot_data(): d = create_distribution(read_in_data()) #print d[0] d_zero = [i for i in d.keys() if d[i] == 0] print d_zero fig = plt.figure() ax = fig.add_subplot(111) x = np.array(d.keys()) y = np.array(d.values()) print 'xaxis lenght is ', np.amax(x) print 'yaxis lenght is ' , np.amax(y), \ ' and total elements are ', np.sum(y) mu = np.mean(x) sigma = np.std(x) median = np.median(x) ax.scatter(x,y) ax.set_xscale('log') ax.set_yscale('log') plt.legend(frameon=False, fontsize=16) plt.show() def test_plot_data(): distr = read_in_data() dd = {'steps': distr} data = pd.DataFrame(data=dd) print(data.describe()) #data.boxplot() data.hist() plt.show() def plot_data(lab='', xlab='job steps', ylab = '# of jobs', \ fname = 'all/distr_of_distr_all_job_steps.eps', col='green', \ s=12, Move=0.00003, input_file='all_job_ids.csv'): d = create_distribution(read_in_data(input_file)) d = create_distribution(d.values()) fig = plt.figure() ax = fig.add_subplot(111) x = np.array(d.keys()) y = np.array(d.values()) print 'xaxis lenght is ', np.amax(x) print 'yaxis lenght is ' , np.amax(y), \ ' and total elements are ', np.sum(y) mu = np.mean(x) sigma = np.std(x) median = np.median(x) ax.scatter(x+1,y,color=col,s=10.5,edgecolor='none') ax.set_xscale('log') ax.set_yscale('log') plt.xlabel(xlab) plt.ylabel(ylab) plt.grid(True) plt.legend(frameon=False, fontsize=14) def adjust_spines(ax, spines): for loc, spine in ax.spines.items(): if loc in spines: #spine.set_position(('outward', 10)) # outward by 10 points spine.set_smart_bounds(False) else: spine.set_color('none') # don't draw spine # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') else: # no yaxis ticks ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: # no xaxis ticks ax.xaxis.set_ticks([]) adjust_spines(ax, ['left', 'bottom']) textstr = '$\mu=%.2f$\n$\mathrm{m}=%.2f$\n$\sigma=%.2f$'\ %(mu, median, sigma) # place a text box in upper left in axes coords # these are matplotlib.patch.Patch properties ax.text(0.2+Move, 0.95, textstr, transform=ax.transAxes,\ fontsize=14, verticalalignment='top', color=col) #bbox=props) plt.grid(True) plt.savefig(fname) print 'Saved in figure ', fname plot_data(input_file='all_job_ids.csv')
apache-2.0
vybstat/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
230
4762
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.cross_validation import KFold from sklearn.cross_validation import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_folds=3): cv = KFold(n=X_train.shape[0], n_folds=n_folds) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv: cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_folds return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
flyingpoops/kaggle-digit-recognizer-team-learning
submit.py
1
1922
import os os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32,lib.cnmem=1,dnn.enabled=False" import pandas as pd import time ########################################################## # Input varialbles flag = 1 #0 for sklearn and 1 for keras classifier_file_name = 'model/cnn2.json' #'C:\kaggle\dr\rfab.pkl' for sklearn weight_file_name = 'model/weights.020-0.992.hdf5' output_file_name = '789.csv' if flag == 0: from sklearn.externals import joblib ########################################################## # Reading Data print ("Reading Test Data") test = pd.read_csv('input/test.csv') ########################################################## # Load Classifier print ("Loading Classifier") clf = joblib.load(classifier_file_name) ########################################################## # Test classifier print ("Generating results to submit") y = clf.predict(test) else: from keras.models import model_from_json ########################################################## # Reading Data print ("Reading Test Data") test = pd.read_csv('input/test.csv').values testX = test.reshape(test.shape[0], 1, 28, 28) testX = testX.astype('float32') testX /= 255.0 ########################################################## # Load Classifier print ("Loading Classifier") fo = open(classifier_file_name, "r") model = model_from_json(fo.read()) fo.close() model.load_weights(weight_file_name) model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]) ########################################################## # Test classifier print ("Generating results to submit") y = model.predict_classes(testX) ########################################################## print ("Writing to file") predictions = pd.DataFrame(data=y,columns=["label"]) predictions["ImageId"] = list(range(1,len(test)+1)) predictions.to_csv(output_file_name,index=False)
apache-2.0
joshloyal/scikit-learn
sklearn/tests/test_metaestimators.py
52
4990
"""Common tests for metaestimators""" import functools import numpy as np from sklearn.base import BaseEstimator from sklearn.externals.six import iterkeys from sklearn.datasets import make_classification from sklearn.utils.testing import assert_true, assert_false, assert_raises from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.feature_selection import RFE, RFECV from sklearn.ensemble import BaggingClassifier class DelegatorData(object): def __init__(self, name, construct, skip_methods=(), fit_args=make_classification()): self.name = name self.construct = construct self.fit_args = fit_args self.skip_methods = skip_methods DELEGATING_METAESTIMATORS = [ DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])), DelegatorData('GridSearchCV', lambda est: GridSearchCV( est, param_grid={'param': [5]}, cv=2), skip_methods=['score']), DelegatorData('RandomizedSearchCV', lambda est: RandomizedSearchCV( est, param_distributions={'param': [5]}, cv=2, n_iter=1), skip_methods=['score']), DelegatorData('RFE', RFE, skip_methods=['transform', 'inverse_transform', 'score']), DelegatorData('RFECV', RFECV, skip_methods=['transform', 'inverse_transform', 'score']), DelegatorData('BaggingClassifier', BaggingClassifier, skip_methods=['transform', 'inverse_transform', 'score', 'predict_proba', 'predict_log_proba', 'predict']) ] def test_metaestimator_delegation(): # Ensures specified metaestimators have methods iff subestimator does def hides(method): @property def wrapper(obj): if obj.hidden_method == method.__name__: raise AttributeError('%r is hidden' % obj.hidden_method) return functools.partial(method, obj) return wrapper class SubEstimator(BaseEstimator): def __init__(self, param=1, hidden_method=None): self.param = param self.hidden_method = hidden_method def fit(self, X, y=None, *args, **kwargs): self.coef_ = np.arange(X.shape[1]) return True def _check_fit(self): if not hasattr(self, 'coef_'): raise RuntimeError('Estimator is not fit') @hides def inverse_transform(self, X, *args, **kwargs): self._check_fit() return X @hides def transform(self, X, *args, **kwargs): self._check_fit() return X @hides def predict(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_log_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def decision_function(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def score(self, X, *args, **kwargs): self._check_fit() return 1.0 methods = [k for k in iterkeys(SubEstimator.__dict__) if not k.startswith('_') and not k.startswith('fit')] methods.sort() for delegator_data in DELEGATING_METAESTIMATORS: delegate = SubEstimator() delegator = delegator_data.construct(delegate) for method in methods: if method in delegator_data.skip_methods: continue assert_true(hasattr(delegate, method)) assert_true(hasattr(delegator, method), msg="%s does not have method %r when its delegate does" % (delegator_data.name, method)) # delegation before fit raises an exception assert_raises(Exception, getattr(delegator, method), delegator_data.fit_args[0]) delegator.fit(*delegator_data.fit_args) for method in methods: if method in delegator_data.skip_methods: continue # smoke test delegation getattr(delegator, method)(delegator_data.fit_args[0]) for method in methods: if method in delegator_data.skip_methods: continue delegate = SubEstimator(hidden_method=method) delegator = delegator_data.construct(delegate) assert_false(hasattr(delegate, method)) assert_false(hasattr(delegator, method), msg="%s has method %r when its delegate does not" % (delegator_data.name, method))
bsd-3-clause
ShipJ/Code
Projects/M2020/pass.py
1
1169
import sys import pandas as pd import numpy as np from Code.config import get_path from collections import Counter pd.set_option('display.width', 500) def main(): path = get_path() # File path to data store sessions = pd.DataFrame(pd.read_csv(path+'/PassGroup/sessions.csv')) names = pd.DataFrame(pd.read_csv(path+'/PassGroup/names.csv')) combined = sessions.merge(names, on='SessionCode', how='left') dels = pd.DataFrame(pd.read_csv(path + '/PassGroup/dels.csv')) # dels = dels.dropna(subset=['ID']).reset_index(drop=True) print dels a = Counter(dels['ID']) b = [i for i in a.keys() if a[i] > 1] c = dels[dels['ID'].isin(b)] d = c[c['PaymentStatus'] == 'Paid'].reset_index(drop=True) rev = d.groupby('ID')['Revenue'].sum().reset_index() dels = dels.drop_duplicates(subset=['ID'], keep='first') print dels for i in range(len(rev)): dels[dels['ID'] == rev['ID'].ix[i]]['Revenue'] = rev['Revenue'].ix[i] all = combined.merge(dels, on='ID', how='left') # all.to_csv(path+'/PassGroup/pass_sessions.csv', index=None, encoding='utf-8-sig') if __name__ == '__main__': main()
mit
StefReck/Km3-Autoencoder
scripts/plotting/plot_occurance_in_dataset.py
1
2648
# -*- coding: utf-8 -*- """ Plot number of events vs Energy, binned. and # up events vs Energy, binned. """ import numpy as np import h5py import matplotlib.pyplot as plt import sys sys.path.append('scripts/util/') #sys.path.append('../util/') from saved_setups_for_plot_statistics import get_plot_statistics_plot_size datafile="data/xzt/train_muon-CC_and_elec-CC_each_240_xzt_shuffled.h5" save_to="results/plots/stats_train_muon-CC_and_elec-CC_each_240_xzt_shuffled.pdf" save_to_down="results/plots/stats_train_muon-CC_and_elec-CC_each_240_xzt_shuffled_down.pdf" debug=0 if debug: energies=np.random.rand(1000)*100 downgoing=np.random.randint(0,2,size=1000) else: file=h5py.File(datafile, "r") mc_info=file["y"] #event_id -> 0, particle_type -> 1, energy -> 2, isCC -> 3, bjorkeny -> 4, #dir_x/y/z -> 5/6/7, time -> 8] energies=mc_info[:,2] downgoing = mc_info[:,7]<0 def make_plot(energies): ylog=True xlog=False figsize, fontsize = get_plot_statistics_plot_size("two_in_one_line") plt.rcParams.update({'font.size': fontsize}) fig, ax = plt.subplots(figsize=figsize) if ylog: plt.yscale("log") if xlog: plt.xscale("log") energy_bins=np.logspace(np.log10(3),np.log10(100),98) else: energy_bins=np.linspace(3,100,98) ax.hist(energies, energy_bins, histtype="step", lw=2) plt.grid() plt.xlabel("Energy (GeV)") plt.ylabel("Number of Events") plt.subplots_adjust(left=0.2) return fig def make_plot_down(downgoing, energy): figsize, fontsize = get_plot_statistics_plot_size("two_in_one_line") plt.rcParams.update({'font.size': fontsize}) fig, ax = plt.subplots(figsize=figsize) energy_bins=np.linspace(3,100,98) hist_1d_energy = np.histogram(energy, bins=98, range=(3,100)) #häufigkeit von energien hist_1d_energy_correct = np.histogram(energy[downgoing.astype(bool)], bins=98, range=(3,100)) #häufigkeit von richtigen energien hist_1d_energy_accuracy_bins = 100*np.divide(hist_1d_energy_correct[0], hist_1d_energy[0], dtype=np.float32) #rel häufigkeit von richtigen energien print("Total fraction of down going events:", np.mean(downgoing)) ax.step(energy_bins, hist_1d_energy_accuracy_bins, lw=2, ) plt.grid() plt.xlabel("Energy (GeV)") plt.ylabel("Down-going (%)") plt.subplots_adjust(left=0.2) return fig fig = make_plot(energies) fig_down = make_plot_down(downgoing, energies) plt.show(fig_down) fig.savefig(save_to) print("Plot saved to", save_to) fig_down.savefig(save_to_down) print("Plot saved to", save_to_down)
mit
RPGOne/Skynet
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/externals/joblib/parallel.py
3
28122
""" Helpers for embarrassingly parallel code. """ # Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org > # Copyright: 2010, Gael Varoquaux # License: BSD 3 clause import os import sys import gc import warnings from collections import Sized from math import sqrt import functools import time import threading import itertools try: import cPickle as pickle except: import pickle from ._multiprocessing_helpers import mp if mp is not None: from .pool import MemmapingPool from multiprocessing.pool import ThreadPool from .format_stack import format_exc, format_outer_frames from .logger import Logger, short_format_time from .my_exceptions import TransportableException, _mk_exception from .disk import memstr_to_kbytes from ._compat import _basestring VALID_BACKENDS = ['multiprocessing', 'threading'] # Environment variables to protect against bad situations when nesting JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__" ############################################################################### # CPU that works also when multiprocessing is not installed (python2.5) def cpu_count(): """ Return the number of CPUs. """ if mp is None: return 1 return mp.cpu_count() ############################################################################### # For verbosity def _verbosity_filter(index, verbose): """ Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index """ if not verbose: return True elif verbose > 10: return False if index == 0: return False verbose = .5 * (11 - verbose) ** 2 scale = sqrt(index / verbose) next_scale = sqrt((index + 1) / verbose) return (int(next_scale) == int(scale)) ############################################################################### class WorkerInterrupt(Exception): """ An exception that is not KeyboardInterrupt to allow subprocesses to be interrupted. """ pass ############################################################################### class SafeFunction(object): """ Wraps a function to make it exception with full traceback in their representation. Useful for parallel computing with multiprocessing, for which exceptions cannot be captured. """ def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): try: return self.func(*args, **kwargs) except KeyboardInterrupt: # We capture the KeyboardInterrupt and reraise it as # something different, as multiprocessing does not # interrupt processing for a KeyboardInterrupt raise WorkerInterrupt() except: e_type, e_value, e_tb = sys.exc_info() text = format_exc(e_type, e_value, e_tb, context=10, tb_offset=1) raise TransportableException(text, e_type) ############################################################################### def delayed(function): """ Decorator used to capture the arguments of a function. """ # Try to pickle the input function, to catch the problems early when # using with multiprocessing pickle.dumps(function) def delayed_function(*args, **kwargs): return function, args, kwargs try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: " functools.wraps fails on some callable objects " return delayed_function ############################################################################### class ImmediateApply(object): """ A non-delayed apply function. """ def __init__(self, func, args, kwargs): # Don't delay the application, to avoid keeping the input # arguments in memory self.results = func(*args, **kwargs) def get(self): return self.results ############################################################################### class CallBack(object): """ Callback used by parallel: it is used for progress reporting, and to add data to be processed """ def __init__(self, index, parallel): self.parallel = parallel self.index = index def __call__(self, out): self.parallel.print_progress(self.index) if self.parallel._original_iterable: self.parallel.dispatch_next() class LockedIterator(object): """Wrapper to protect a thread-unsafe iterable against concurrent access. A Python generator is not thread-safe by default and will raise ValueError("generator already executing") if two threads consume it concurrently. In joblib this could typically happen when the passed iterator is a generator expression and pre_dispatch != 'all'. In that case a callback is passed to the multiprocessing apply_async call and helper threads will trigger the consumption of the source iterable in the dispatch_next method. """ def __init__(self, it): self._lock = threading.Lock() self._it = iter(it) def __iter__(self): return self def next(self): with self._lock: return next(self._it) # For Python 3 compat __next__ = next ############################################################################### class Parallel(Logger): ''' Helper class for readable parallel mapping. Parameters ----------- n_jobs: int The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. backend: str or None Specify the parallelization backend implementation. Supported backends are: - "multiprocessing" used by default, can induce some communication and memory overhead when exchanging input and output data with the with the worker Python processes. - "threading" is a very low-overhead backend but it suffers from the Python Global Interpreter Lock if the called function relies a lot on Python objects. "threading" is mostly useful when the execution bottleneck is a compiled extension that explicitly releases the GIL (for instance a Cython loop wrapped in a "with nogil" block or an expensive call to a library such as NumPy). verbose: int, optional The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout. The frequency of the messages increases with the verbosity level. If it more than 10, all iterations are reported. pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'} The amount of jobs to be pre-dispatched. Default is 'all', but it may be memory consuming, for instance if each job involves a lot of a data. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. Only active when backend="multiprocessing". max_nbytes int, str, or None, optional, 100e6 (100MB) by default Threshold on the size of arrays passed to the workers that triggers automated memory mapping in temp_folder. Can be an int in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. Use None to disable memmaping of large arrays. Only active when backend="multiprocessing". Notes ----- This object uses the multiprocessing module to compute in parallel the application of a function to many different arguments. The main functionality it brings in addition to using the raw multiprocessing API are (see examples for details): * More readable code, in particular since it avoids constructing list of arguments. * Easier debugging: - informative tracebacks even when the error happens on the client side - using 'n_jobs=1' enables to turn off parallel computing for debugging without changing the codepath - early capture of pickling errors * An optional progress meter. * Interruption of multiprocesses jobs with 'Ctrl-C' * Flexible pickling control for the communication to and from the worker processes. * Ability to use shared memory efficiently with worker processes for large numpy-based datastructures. Examples -------- A simple example: >>> from math import sqrt >>> from sklearn.externals.joblib import Parallel, delayed >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] Reshaping the output when the function has several return values: >>> from math import modf >>> from sklearn.externals.joblib import Parallel, delayed >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10)) >>> res, i = zip(*r) >>> res (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5) >>> i (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0) The progress meter: the higher the value of `verbose`, the more messages:: >>> from time import sleep >>> from sklearn.externals.joblib import Parallel, delayed >>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP [Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s [Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s [Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s [Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished Traceback example, note how the line of the error is indicated as well as the values of the parameter passed to the function that triggered the exception, even though the traceback happens in the child process:: >>> from heapq import nlargest >>> from sklearn.externals.joblib import Parallel, delayed >>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP #... --------------------------------------------------------------------------- Sub-process traceback: --------------------------------------------------------------------------- TypeError Mon Nov 12 11:37:46 2012 PID: 12934 Python 2.7.3: /usr/bin/python ........................................................................... /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None) 419 if n >= size: 420 return sorted(iterable, key=key, reverse=True)[:n] 421 422 # When key is none, use simpler decoration 423 if key is None: --> 424 it = izip(iterable, count(0,-1)) # decorate 425 result = _nlargest(n, it) 426 return map(itemgetter(0), result) # undecorate 427 428 # General case, slowest method TypeError: izip argument #1 must support iteration ___________________________________________________________________________ Using pre_dispatch in a producer/consumer situation, where the data is generated on the fly. Note how the producer is first called a 3 times before the parallel loop is initiated, and then called to generate new data on the fly. In this case the total number of iterations cannot be reported in the progress messages:: >>> from math import sqrt >>> from sklearn.externals.joblib import Parallel, delayed >>> def producer(): ... for i in range(6): ... print('Produced %s' % i) ... yield i >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')( ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP Produced 0 Produced 1 Produced 2 [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s Produced 3 [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s Produced 4 [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s Produced 5 [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s [Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished ''' def __init__(self, n_jobs=1, backend=None, verbose=0, pre_dispatch='all', temp_folder=None, max_nbytes=100e6, mmap_mode='r'): self.verbose = verbose self._mp_context = None if backend is None: backend = "multiprocessing" elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'): # Make it possible to pass a custom multiprocessing context as # backend to change the start method to forkserver or spawn or # preload modules on the forkserver helper process. self._mp_context = backend backend = "multiprocessing" if backend not in VALID_BACKENDS: raise ValueError("Invalid backend: %s, expected one of %r" % (backend, VALID_BACKENDS)) self.backend = backend self.n_jobs = n_jobs self.pre_dispatch = pre_dispatch self._pool = None self._temp_folder = temp_folder if isinstance(max_nbytes, _basestring): self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes) else: self._max_nbytes = max_nbytes self._mmap_mode = mmap_mode # Not starting the pool in the __init__ is a design decision, to be # able to close it ASAP, and not burden the user with closing it. self._output = None self._jobs = list() # A flag used to abort the dispatching of jobs in case an # exception is found self._aborting = False def dispatch(self, func, args, kwargs): """ Queue the function for computing, with or without multiprocessing """ if self._pool is None: job = ImmediateApply(func, args, kwargs) index = len(self._jobs) if not _verbosity_filter(index, self.verbose): self._print('Done %3i jobs | elapsed: %s', (index + 1, short_format_time(time.time() - self._start_time) )) self._jobs.append(job) self.n_dispatched += 1 else: # If job.get() catches an exception, it closes the queue: if self._aborting: return try: self._lock.acquire() job = self._pool.apply_async(SafeFunction(func), args, kwargs, callback=CallBack(self.n_dispatched, self)) self._jobs.append(job) self.n_dispatched += 1 except AssertionError: print('[Parallel] Pool seems closed') finally: self._lock.release() def dispatch_next(self): """ Dispatch more data for parallel processing """ self._dispatch_amount += 1 while self._dispatch_amount: try: # XXX: possible race condition shuffling the order of # dispatches in the next two lines. func, args, kwargs = next(self._original_iterable) self.dispatch(func, args, kwargs) self._dispatch_amount -= 1 except ValueError: """ Race condition in accessing a generator, we skip, the dispatch will be done later. """ except StopIteration: self._iterating = False self._original_iterable = None return def _print(self, msg, msg_args): """ Display the message on stout or stderr depending on verbosity """ # XXX: Not using the logger framework: need to # learn to use logger better. if not self.verbose: return if self.verbose < 50: writer = sys.stderr.write else: writer = sys.stdout.write msg = msg % msg_args writer('[%s]: %s\n' % (self, msg)) def print_progress(self, index): """Display the process of the parallel execution only a fraction of time, controlled by self.verbose. """ if not self.verbose: return elapsed_time = time.time() - self._start_time # This is heuristic code to print only 'verbose' times a messages # The challenge is that we may not know the queue length if self._original_iterable: if _verbosity_filter(index, self.verbose): return self._print('Done %3i jobs | elapsed: %s', (index + 1, short_format_time(elapsed_time), )) else: # We are finished dispatching queue_length = self.n_dispatched # We always display the first loop if not index == 0: # Display depending on the number of remaining items # A message as soon as we finish dispatching, cursor is 0 cursor = (queue_length - index + 1 - self._pre_dispatch_amount) frequency = (queue_length // self.verbose) + 1 is_last_item = (index + 1 == queue_length) if (is_last_item or cursor % frequency): return remaining_time = (elapsed_time / (index + 1) * (self.n_dispatched - index - 1.)) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', (index + 1, queue_length, short_format_time(elapsed_time), short_format_time(remaining_time), )) def retrieve(self): self._output = list() while self._iterating or len(self._jobs) > 0: if len(self._jobs) == 0: # Wait for an async callback to dispatch new jobs time.sleep(0.01) continue # We need to be careful: the job queue can be filling up as # we empty it if hasattr(self, '_lock'): self._lock.acquire() job = self._jobs.pop(0) if hasattr(self, '_lock'): self._lock.release() try: self._output.append(job.get()) except tuple(self.exceptions) as exception: try: self._aborting = True self._lock.acquire() if isinstance(exception, (KeyboardInterrupt, WorkerInterrupt)): # We have captured a user interruption, clean up # everything if hasattr(self, '_pool'): self._pool.close() self._pool.terminate() # We can now allow subprocesses again os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0) raise exception elif isinstance(exception, TransportableException): # Capture exception to add information on the local # stack in addition to the distant stack this_report = format_outer_frames(context=10, stack_start=1) report = """Multiprocessing exception: %s --------------------------------------------------------------------------- Sub-process traceback: --------------------------------------------------------------------------- %s""" % ( this_report, exception.message, ) # Convert this to a JoblibException exception_type = _mk_exception(exception.etype)[0] raise exception_type(report) raise exception finally: self._lock.release() def __call__(self, iterable): if self._jobs: raise ValueError('This Parallel instance is already running') n_jobs = self.n_jobs if n_jobs == 0: raise ValueError('n_jobs == 0 in Parallel has no meaning') if n_jobs < 0 and mp is not None: n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) # The list of exceptions that we will capture self.exceptions = [TransportableException] self._lock = threading.Lock() # Whether or not to set an environment flag to track # multiple process spawning set_environ_flag = False if (n_jobs is None or mp is None or n_jobs == 1): n_jobs = 1 self._pool = None elif self.backend == 'threading': self._pool = ThreadPool(n_jobs) elif self.backend == 'multiprocessing': if mp.current_process().daemon: # Daemonic processes cannot have children n_jobs = 1 self._pool = None warnings.warn( 'Multiprocessing-backed parallel loops cannot be nested,' ' setting n_jobs=1', stacklevel=2) elif threading.current_thread().name != 'MainThread': # Prevent posix fork inside in non-main posix threads n_jobs = 1 self._pool = None warnings.warn( 'Multiprocessing backed parallel loops cannot be nested' ' below threads, setting n_jobs=1', stacklevel=2) else: already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0)) if already_forked: raise ImportError('[joblib] Attempting to do parallel computing ' 'without protecting your import on a system that does ' 'not support forking. To use parallel-computing in a ' 'script, you must protect you main loop using "if ' "__name__ == '__main__'" '". Please see the joblib documentation on Parallel ' 'for more information' ) # Make sure to free as much memory as possible before forking gc.collect() # Set an environment variable to avoid infinite loops set_environ_flag = True poolargs = dict( max_nbytes=self._max_nbytes, mmap_mode=self._mmap_mode, temp_folder=self._temp_folder, verbose=max(0, self.verbose - 50), context_id=0, # the pool is used only for one call ) if self._mp_context is not None: # Use Python 3.4+ multiprocessing context isolation poolargs['context'] = self._mp_context self._pool = MemmapingPool(n_jobs, **poolargs) # We are using multiprocessing, we also want to capture # KeyboardInterrupts self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt]) else: raise ValueError("Unsupported backend: %s" % self.backend) pre_dispatch = self.pre_dispatch if isinstance(iterable, Sized): # We are given a sized (an object with len). No need to be lazy. pre_dispatch = 'all' if pre_dispatch == 'all' or n_jobs == 1: self._original_iterable = None self._pre_dispatch_amount = 0 else: # The dispatch mechanism relies on multiprocessing helper threads # to dispatch tasks from the original iterable concurrently upon # job completions. As Python generators are not thread-safe we # need to wrap it with a lock iterable = LockedIterator(iterable) self._original_iterable = iterable self._dispatch_amount = 0 if hasattr(pre_dispatch, 'endswith'): pre_dispatch = eval(pre_dispatch) self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch) # The main thread will consume the first pre_dispatch items and # the remaining items will later be lazily dispatched by async # callbacks upon task completions iterable = itertools.islice(iterable, pre_dispatch) self._start_time = time.time() self.n_dispatched = 0 try: if set_environ_flag: # Set an environment variable to avoid infinite loops os.environ[JOBLIB_SPAWNED_PROCESS] = '1' self._iterating = True for function, args, kwargs in iterable: self.dispatch(function, args, kwargs) if pre_dispatch == "all" or n_jobs == 1: # The iterable was consumed all at once by the above for loop. # No need to wait for async callbacks to trigger to # consumption. self._iterating = False self.retrieve() # Make sure that we get a last message telling us we are done elapsed_time = time.time() - self._start_time self._print('Done %3i out of %3i | elapsed: %s finished', (len(self._output), len(self._output), short_format_time(elapsed_time) )) finally: if n_jobs > 1: self._pool.close() self._pool.terminate() # terminate does a join() if self.backend == 'multiprocessing': os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0) self._jobs = list() output = self._output self._output = None return output def __repr__(self): return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
bsd-3-clause
hsiaoyi0504/scikit-learn
sklearn/linear_model/randomized_l1.py
95
23365
""" Randomized Lasso/Logistic: feature selection based on Lasso and sparse Logistic Regression """ # Author: Gael Varoquaux, Alexandre Gramfort # # License: BSD 3 clause import itertools from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy.sparse import issparse from scipy import sparse from scipy.interpolate import interp1d from .base import center_data from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.joblib import Memory, Parallel, delayed from ..utils import (as_float_array, check_random_state, check_X_y, check_array, safe_mask, ConvergenceWarning) from ..utils.validation import check_is_fitted from .least_angle import lars_path, LassoLarsIC from .logistic import LogisticRegression ############################################################################### # Randomized linear model: feature selection def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200, n_jobs=1, verbose=False, pre_dispatch='3*n_jobs', random_state=None, sample_fraction=.75, **params): random_state = check_random_state(random_state) # We are generating 1 - weights, and not weights n_samples, n_features = X.shape if not (0 < scaling < 1): raise ValueError( "'scaling' should be between 0 and 1. Got %r instead." % scaling) scaling = 1. - scaling scores_ = 0.0 for active_set in Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(estimator_func)( X, y, weights=scaling * random_state.random_integers( 0, 1, size=(n_features,)), mask=(random_state.rand(n_samples) < sample_fraction), verbose=max(0, verbose - 1), **params) for _ in range(n_resampling)): scores_ += active_set scores_ /= n_resampling return scores_ class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class to implement randomized linear models for feature selection This implements the strategy by Meinshausen and Buhlman: stability selection with randomized sampling, and random re-weighting of the penalty. """ @abstractmethod def __init__(self): pass _center_data = staticmethod(center_data) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, sparse matrix shape = [n_samples, n_features] Training data. y : array-like, shape = [n_samples] Target values. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True) X = as_float_array(X, copy=False) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = self._center_data(X, y, self.fit_intercept, self.normalize) estimator_func, params = self._make_estimator_and_params(X, y) memory = self.memory if isinstance(memory, six.string_types): memory = Memory(cachedir=memory) scores_ = memory.cache( _resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'] )( estimator_func, X, y, scaling=self.scaling, n_resampling=self.n_resampling, n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch, random_state=self.random_state, sample_fraction=self.sample_fraction, **params) if scores_.ndim == 1: scores_ = scores_[:, np.newaxis] self.all_scores_ = scores_ self.scores_ = np.max(self.all_scores_, axis=1) return self def _make_estimator_and_params(self, X, y): """Return the parameters passed to the estimator""" raise NotImplementedError def get_support(self, indices=False): """Return a mask, or list, of the features/indices selected.""" check_is_fitted(self, 'scores_') mask = self.scores_ > self.selection_threshold return mask if not indices else np.where(mask)[0] # XXX: the two function below are copy/pasted from feature_selection, # Should we add an intermediate base class? def transform(self, X): """Transform a new matrix using the selected features""" mask = self.get_support() X = check_array(X) if len(mask) != X.shape[1]: raise ValueError("X has a different shape than during fitting.") return check_array(X)[:, safe_mask(X, mask)] def inverse_transform(self, X): """Transform a new matrix using the selected features""" support = self.get_support() if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size)) Xt[:, support] = X return Xt ############################################################################### # Randomized lasso: regression settings def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False, precompute=False, eps=np.finfo(np.float).eps, max_iter=500): X = X[safe_mask(X, mask)] y = y[mask] # Center X and y to avoid fit the intercept X -= X.mean(axis=0) y -= y.mean() alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float)) X = (1 - weights) * X with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas_, _, coef_ = lars_path(X, y, Gram=precompute, copy_X=False, copy_Gram=False, alpha_min=np.min(alpha), method='lasso', verbose=verbose, max_iter=max_iter, eps=eps) if len(alpha) > 1: if len(alphas_) > 1: # np.min(alpha) < alpha_min interpolator = interp1d(alphas_[::-1], coef_[:, ::-1], bounds_error=False, fill_value=0.) scores = (interpolator(alpha) != 0.0) else: scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool) else: scores = coef_[:, -1] != 0.0 return scores class RandomizedLasso(BaseRandomizedLinearModel): """Randomized Lasso. Randomized Lasso works by resampling the train data and computing a Lasso on each resampling. In short, the features selected more often are good features. It is also known as stability selection. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- alpha : float, 'aic', or 'bic', optional The regularization parameter alpha parameter in the Lasso. Warning: this is not the alpha parameter in the stability selection article which is scaling. scaling : float, optional The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. sample_fraction : float, optional The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional Number of randomized models. selection_threshold: float, optional The score above which features should be selected. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default True If True, the regressors X will be normalized before regression. precompute : True | False | 'auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to 'auto' let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform in the Lars algorithm. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the 'tol' parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max of \ ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLasso >>> randomized_lasso = RandomizedLasso() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLogisticRegression, LogisticRegression """ def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.alpha = alpha self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.eps = eps self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): assert self.precompute in (True, False, None, 'auto') alpha = self.alpha if alpha in ('aic', 'bic'): model = LassoLarsIC(precompute=self.precompute, criterion=self.alpha, max_iter=self.max_iter, eps=self.eps) model.fit(X, y) self.alpha_ = alpha = model.alpha_ return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter, eps=self.eps, precompute=self.precompute) ############################################################################### # Randomized logistic: classification settings def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores class RandomizedLogisticRegression(BaseRandomizedLinearModel): """Randomized Logistic Regression Randomized Regression works by resampling the train data and computing a LogisticRegression on each resampling. In short, the features selected more often are good features. It is also known as stability selection. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- C : float, optional, default=1 The regularization parameter C in the LogisticRegression. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional, default=200 Number of randomized models. selection_threshold : float, optional, default=0.25 The score above which features should be selected. fit_intercept : boolean, optional, default=True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default=True If True, the regressors X will be normalized before regression. tol : float, optional, default=1e-3 tolerance for stopping criteria of LogisticRegression n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max \ of ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLogisticRegression >>> randomized_logistic = RandomizedLogisticRegression() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLasso, Lasso, ElasticNet """ def __init__(self, C=1, scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, tol=1e-3, fit_intercept=True, verbose=False, normalize=True, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.C = C self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.tol = tol self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): params = dict(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept) return _randomized_logistic, params def _center_data(self, X, y, fit_intercept, normalize=False): """Center the data in X but not in y""" X, _, Xmean, _, X_std = center_data(X, y, fit_intercept, normalize=normalize) return X, y, Xmean, y, X_std ############################################################################### # Stability paths def _lasso_stability_path(X, y, mask, weights, eps): "Inner loop of lasso_stability_path" X = X * weights[np.newaxis, :] X = X[safe_mask(X, mask), :] y = y[mask] alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0] alpha_min = eps * alpha_max # set for early stopping in path with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False, alpha_min=alpha_min) # Scale alpha by alpha_max alphas /= alphas[0] # Sort alphas in assending order alphas = alphas[::-1] coefs = coefs[:, ::-1] # Get rid of the alphas that are too small mask = alphas >= eps # We also want to keep the first one: it should be close to the OLS # solution mask[0] = True alphas = alphas[mask] coefs = coefs[:, mask] return alphas, coefs def lasso_stability_path(X, y, scaling=0.5, random_state=None, n_resampling=200, n_grid=100, sample_fraction=0.75, eps=4 * np.finfo(np.float).eps, n_jobs=1, verbose=False): """Stabiliy path based on randomized Lasso estimates Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- X : array-like, shape = [n_samples, n_features] training data. y : array-like, shape = [n_samples] target values. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. random_state : integer or numpy.random.RandomState, optional The generator used to randomize the design. n_resampling : int, optional, default=200 Number of randomized models. n_grid : int, optional, default=100 Number of grid points. The path is linearly reinterpolated on a grid between 0 and 1 before computing the scores. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. eps : float, optional Smallest value of alpha / alpha_max considered n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Returns ------- alphas_grid : array, shape ~ [n_grid] The grid points between 0 and 1: alpha/alpha_max scores_path : array, shape = [n_features, n_grid] The scores for each feature along the path. Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. """ rng = check_random_state(random_state) if not (0 < scaling < 1): raise ValueError("Parameter 'scaling' should be between 0 and 1." " Got %r instead." % scaling) n_samples, n_features = X.shape paths = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_lasso_stability_path)( X, y, mask=rng.rand(n_samples) < sample_fraction, weights=1. - scaling * rng.random_integers(0, 1, size=(n_features,)), eps=eps) for k in range(n_resampling)) all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths])))) # Take approximately n_grid values stride = int(max(1, int(len(all_alphas) / float(n_grid)))) all_alphas = all_alphas[::stride] if not all_alphas[-1] == 1: all_alphas.append(1.) all_alphas = np.array(all_alphas) scores_path = np.zeros((n_features, len(all_alphas))) for alphas, coefs in paths: if alphas[0] != 0: alphas = np.r_[0, alphas] coefs = np.c_[np.ones((n_features, 1)), coefs] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] coefs = np.c_[coefs, np.zeros((n_features, 1))] scores_path += (interp1d(alphas, coefs, kind='nearest', bounds_error=False, fill_value=0, axis=-1)(all_alphas) != 0) scores_path /= n_resampling return all_alphas, scores_path
bsd-3-clause
peraktong/Cannon-Experiment
DR13_red_clump/0324_read_table_rc_atelast4_plot.py
1
7213
import numpy as np from astropy.table import Table from astropy.io import fits import matplotlib.pyplot as plt import matplotlib import pickle from matplotlib import cm from numpy.random import randn # table path path = "/Users/caojunzhi/Downloads/upload_20170322/red_clump_dr13.fits" star = fits.open(path) table = Table.read(path) """ There are 13 columns in the table: 1. 'APOGEEID' -- The name of the star 2. 'VISIT' -- The name of the visit file 3. BJD -- Barycentric JD Inferred labels are from the Cannon. The spectra we use are from the first combined spectra (There are two combined spectra for each star, which are obtained by two different methods) : (1) global weighting, where each visit spectrum is weighted by its (S/N)2, and (2) pixel-by-pixel weighting, where each pixel is weighted by its (S/N)2. 4. TEFF 5. LOGG 6. FEH The abc parameters for each visit: 7. A -- parameter a 8. B -- parameter b 9. C -- parameter c 10. CHIINF -- chi-squared for the inferred flux from the cannon (a=0,b=1,c=0) 11. CHIMIX -- chi-squared for the mixed flux from the abc fit. 12. VBARY -- The barycentric Velocity(km/s) from the APOGEE team. 13. VSHIFT -- The velocity shift from the abc fit(km/s) 14. FIBER -- Fiber ID 15. SNR -- SNR of the visit #### The covariance matrix of the abc fit is in HDU0 data, which is a 3*3*N 3-d matrix. N is the number of visits. ### """ # read covariance matrix from the abc fit: un_cov = star[0].data[:,:,0] #print(un_cov) # read the velocity shift from the abc fit v_shift = table["VSHIFT"] #print(v_shift.shape) ######################## #Read table and plot to check. class plot(): def read_table(self): path = "/Users/caojunzhi/Downloads/upload_20170322/red_clump_atleast_4.fits" star = fits.open(path) table = Table.read(path) # read it: name = table["APOGEEID"] self.name = name un_cov = star[0].data self.un_cov = un_cov a = table["A"] b = table["B"] c = table["C"] self.a = a self.b = b self.c = c mask = 2*b>a+c self.mask = mask VBARY = table["VBARY"] self.VBARY =VBARY SHIFT = table["VSHIFT"] self.shift = SHIFT teff = table["TEFF"] self.teff = teff logg = table["LOGG"] self.logg = logg feh = table["FEH"] self.feh = feh self.chi_inf = table["CHIINF"] self.chi_mix = table["CHIMIX"] self.BJD = table["BJD"] self.fiber = table["FIBER"] self.SNR =table["SNR"] def plot_std_before_after(self): # From the average (c+a)/(a+b+c) # Do put a mask here mask = self.mask # add points with the same fiberid together name = self.name[mask] target = list(set(name)) VBARY = self.VBARY[mask] shift =self.shift[mask] SNR = self.SNR[mask] fusion_new = [] # name+std_old and std_new + SNR for i in range(0,len(target)): print("Doing %.2f %%"%(i/len(target)*100)) index = np.where(name == target[i]) index = np.array(index) index = index.ravel() std_old_i = np.std(VBARY[index]) std_new_i = np.std(VBARY[index]+shift[index]) SNR_i = np.nanmedian(SNR[index]) fusion_new.append([target[i],std_old_i,std_new_i,SNR_i]) fusion_new = np.array(fusion_new) # portion+fiber+rv name = fusion_new[:, 0] std_old = fusion_new[:,1] std_new = fusion_new[:,2] self.std_old = std_old self.std_new = std_new SNR = fusion_new[:,3] print("check shape") print(name.shape,std_old.shape,std_new.shape,SNR.shape) # let's plot font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) plt.subplot(1,1,1) plt.plot(std_old,"ko",label="Before the correction",markersize=3,alpha=0.3) plt.xlabel("Visit") plt.ylabel("Standard deviation $km/s$") plt.plot(std_new, "rx", label="After the correction", markersize=3,alpha=0.3) plt.legend() # ax1 """ ax1.scatter(std_old, marker='x', c=SNR, vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm) ax1.scatter(std_new, marker='o', c=SNR, vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm) """ plt.suptitle("Standard deviations of RV before and after the correction", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170322/" + "std_before_after_rc" + ".png" fig.savefig(save_path, dpi=500) plt.close() def diagnostic_plot(self): VBARY = self.std_old V_c = self.std_new plt.plot(VBARY,"ro") plt.plot(V_c,"ro") plt.show() def hist_rv_std(self): VBARY = self.std_old V_c = self.std_new VBARY = np.array(VBARY,dtype=float).ravel() V_c = np.array(V_c, dtype=float).ravel() font = {'weight': 'bold', 'size': 15} matplotlib.rc('font', **font) f, (ax1,ax2) = \ plt.subplots(1,2) colors = ["cyan","r"] name = ["RV std before correction","RV std after correction"] rms_old = (np.sum(VBARY*VBARY)/len(VBARY))**0.5 rms_new = (np.sum(V_c*V_c)/len(V_c)) ** 0.5 ax1.hist(VBARY, bins=60,range=[-0.2,3], color=colors[0], label="%s RMS = %.2f $km/s$"%(name[0],rms_old)) ax2.hist(V_c,bins=60,range=[-0.2,3], color=colors[1], label="%s RMS = %.2f $km/s$" % (name[1], rms_new)) #ax1.set_title('Histogram of Radial velocity shifts', fontsize=30) ax1.set_xlabel('values of radial velocity std $ Km/s$', fontsize=15) ax1.set_ylabel('Number', fontsize=15) ax2.set_xlabel('values of radial velocity std $ Km/s$', fontsize=15) ax2.set_ylabel('Number', fontsize=15) ax1.legend(prop={'size': 15}) ax2.legend(prop={'size': 15}) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) f.suptitle("Histogram of RV std before and after the correction",fontsize=25) f.legends #f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines") # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(18.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170322/" + "histogram_rv_std_before_after" + ".png" fig.savefig(save_path, dpi=500) plt.close() model = plot() model.read_table() model.plot_std_before_after() model.hist_rv_std() #model.diagnostic_plot()
mit
drammock/mne-python
mne/decoding/tests/test_csp.py
13
13483
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Romain Trachel <trachelr@gmail.com> # Alexandre Barachant <alexandre.barachant@gmail.com> # Jean-Remi King <jeanremi.king@gmail.com> # # License: BSD (3-clause) import os.path as op import numpy as np import pytest from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from mne import io, Epochs, read_events, pick_types from mne.decoding.csp import CSP, _ajd_pham, SPoC from mne.utils import requires_sklearn data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_dir, 'test_raw.fif') event_name = op.join(data_dir, 'test-eve.fif') tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) # if stop is too small pca may fail in some cases, but we're okay on this file start, stop = 0, 8 def simulate_data(target, n_trials=100, n_channels=10, random_state=42): """Simulate data according to an instantaneous mixin model. Data are simulated in the statistical source space, where one source is modulated according to a target variable, before being mixed with a random mixing matrix. """ rs = np.random.RandomState(random_state) # generate a orthogonal mixin matrix mixing_mat = np.linalg.svd(rs.randn(n_channels, n_channels))[0] S = rs.randn(n_trials, n_channels, 50) S[:, 0] *= np.atleast_2d(np.sqrt(target)).T S[:, 1:] *= 0.01 # less noise X = np.dot(mixing_mat, S).transpose((1, 0, 2)) return X, mixing_mat def deterministic_toy_data(classes=('class_a', 'class_b')): """Generate a small deterministic toy data set. Four independent sources are modulated by the target class and mixed into signal space. """ sources_a = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=float) * 2 - 1 sources_b = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=float) * 2 - 1 sources_a[0, :] *= 1 sources_a[1, :] *= 2 sources_b[2, :] *= 3 sources_b[3, :] *= 4 mixing = np.array([[1.0, 0.8, 0.6, 0.4], [0.8, 1.0, 0.8, 0.6], [0.6, 0.8, 1.0, 0.8], [0.4, 0.6, 0.8, 1.0]]) x_class_a = mixing @ sources_a x_class_b = mixing @ sources_b x = np.stack([x_class_a, x_class_b]) y = np.array(classes) return x, y @pytest.mark.slowtest def test_csp(): """Test Common Spatial Patterns algorithm on epochs.""" raw = io.read_raw_fif(raw_fname, preload=False) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[2:12:3] # subselect channels -> disable proj! raw.add_proj([], remove_existing=True) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=False) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] y = epochs.events[:, -1] # Init pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False) for reg in ['foo', -0.1, 1.1]: csp = CSP(reg=reg, norm_trace=False) pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1]) for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]: CSP(reg=reg, norm_trace=False) for cov_est in ['foo', None]: pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False) with pytest.raises(TypeError, match='instance of bool'): CSP(norm_trace='foo') for cov_est in ['concat', 'epoch']: CSP(cov_est=cov_est, norm_trace=False) n_components = 3 # Fit for norm_trace in [True, False]: csp = CSP(n_components=n_components, norm_trace=norm_trace) csp.fit(epochs_data, epochs.events[:, -1]) assert_equal(len(csp.mean_), n_components) assert_equal(len(csp.std_), n_components) # Transform X = csp.fit_transform(epochs_data, y) sources = csp.transform(epochs_data) assert (sources.shape[1] == n_components) assert (csp.filters_.shape == (n_channels, n_channels)) assert (csp.patterns_.shape == (n_channels, n_channels)) assert_array_almost_equal(sources, X) # Test data exception pytest.raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) pytest.raises(ValueError, csp.fit, epochs, y) pytest.raises(ValueError, csp.transform, epochs) # Test plots epochs.pick_types(meg='mag') cmap = ('RdBu', True) components = np.arange(n_components) for plot in (csp.plot_patterns, csp.plot_filters): plot(epochs.info, components=components, res=12, show=False, cmap=cmap) # Test with more than 2 classes epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks, event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4), baseline=(None, 0), proj=False, preload=True) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] n_channels = epochs_data.shape[1] for cov_est in ['concat', 'epoch']: csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False) csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) assert_equal(len(csp._classes), 4) assert_array_equal(csp.filters_.shape, [n_channels, n_channels]) assert_array_equal(csp.patterns_.shape, [n_channels, n_channels]) # Test average power transform n_components = 2 assert (csp.transform_into == 'average_power') feature_shape = [len(epochs_data), n_components] X_trans = dict() for log in (None, True, False): csp = CSP(n_components=n_components, log=log, norm_trace=False) assert (csp.log is log) Xt = csp.fit_transform(epochs_data, epochs.events[:, 2]) assert_array_equal(Xt.shape, feature_shape) X_trans[str(log)] = Xt # log=None => log=True assert_array_almost_equal(X_trans['None'], X_trans['True']) # Different normalization return different transform assert (np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.) # Check wrong inputs pytest.raises(ValueError, CSP, transform_into='average_power', log='foo') # Test csp space transform csp = CSP(transform_into='csp_space', norm_trace=False) assert (csp.transform_into == 'csp_space') for log in ('foo', True, False): pytest.raises(ValueError, CSP, transform_into='csp_space', log=log, norm_trace=False) n_components = 2 csp = CSP(n_components=n_components, transform_into='csp_space', norm_trace=False) Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]] assert_array_equal(Xt.shape, feature_shape) # Check mixing matrix on simulated data y = np.array([100] * 50 + [1] * 50) X, A = simulate_data(y) for cov_est in ['concat', 'epoch']: # fit csp csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False) csp.fit(X, y) # check the first pattern match the mixing matrix # the sign might change corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1]) assert np.abs(corr) > 0.99 # check output out = csp.transform(X) corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1]) assert np.abs(corr) > 0.95 @requires_sklearn def test_regularized_csp(): """Test Common Spatial Patterns algorithm using regularized covariance.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs_data = epochs.get_data() n_channels = epochs_data.shape[1] n_components = 3 reg_cov = [None, 0.05, 'ledoit_wolf', 'oas'] for reg in reg_cov: csp = CSP(n_components=n_components, reg=reg, norm_trace=False, rank=None) csp.fit(epochs_data, epochs.events[:, -1]) y = epochs.events[:, -1] X = csp.fit_transform(epochs_data, y) assert (csp.filters_.shape == (n_channels, n_channels)) assert (csp.patterns_.shape == (n_channels, n_channels)) assert_array_almost_equal(csp.fit(epochs_data, y). transform(epochs_data), X) # test init exception pytest.raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) pytest.raises(ValueError, csp.fit, epochs, y) pytest.raises(ValueError, csp.transform, epochs) csp.n_components = n_components sources = csp.transform(epochs_data) assert (sources.shape[1] == n_components) @requires_sklearn def test_csp_pipeline(): """Test if CSP works in a pipeline.""" from sklearn.svm import SVC from sklearn.pipeline import Pipeline csp = CSP(reg=1, norm_trace=False) svc = SVC() pipe = Pipeline([("CSP", csp), ("SVC", svc)]) pipe.set_params(CSP__reg=0.2) assert (pipe.get_params()["CSP__reg"] == 0.2) def test_ajd(): """Test approximate joint diagonalization.""" # The implementation shuold obtain the same # results as the Matlab implementation by Pham Dinh-Tuan. # Generate a set of cavariances matrices for test purpose n_times, n_channels = 10, 3 seed = np.random.RandomState(0) diags = 2.0 + 0.1 * seed.randn(n_times, n_channels) A = 2 * seed.rand(n_channels, n_channels) - 1 A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T covmats = np.empty((n_times, n_channels, n_channels)) for i in range(n_times): covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T) V, D = _ajd_pham(covmats) # Results obtained with original matlab implementation V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574], [0.694689013234610, 0.775690358505945, -1.162043086446043], [-0.592603135588066, -0.598996925696260, 1.009550086271192]] assert_array_almost_equal(V, V_matlab) def test_spoc(): """Test SPoC.""" X = np.random.randn(10, 10, 20) y = np.random.randn(10) spoc = SPoC(n_components=4) spoc.fit(X, y) Xt = spoc.transform(X) assert_array_equal(Xt.shape, [10, 4]) spoc = SPoC(n_components=4, transform_into='csp_space') spoc.fit(X, y) Xt = spoc.transform(X) assert_array_equal(Xt.shape, [10, 4, 20]) assert_array_equal(spoc.filters_.shape, [10, 10]) assert_array_equal(spoc.patterns_.shape, [10, 10]) # check y pytest.raises(ValueError, spoc.fit, X, y * 0) # Check that doesn't take CSP-spcific input pytest.raises(TypeError, SPoC, cov_est='epoch') # Check mixing matrix on simulated data rs = np.random.RandomState(42) y = rs.rand(100) * 50 + 1 X, A = simulate_data(y) # fit spoc spoc = SPoC(n_components=1) spoc.fit(X, y) # check the first patterns match the mixing matrix corr = np.abs(np.corrcoef(spoc.patterns_[0, :].T, A[:, 0])[0, 1]) assert np.abs(corr) > 0.99 # check output out = spoc.transform(X) corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1]) assert np.abs(corr) > 0.85 def test_csp_twoclass_symmetry(): """Test that CSP is symmetric when swapping classes.""" x, y = deterministic_toy_data(['class_a', 'class_b']) csp = CSP(norm_trace=False, transform_into='average_power', log=True) log_power = csp.fit_transform(x, y) log_power_ratio_ab = log_power[0] - log_power[1] x, y = deterministic_toy_data(['class_b', 'class_a']) csp = CSP(norm_trace=False, transform_into='average_power', log=True) log_power = csp.fit_transform(x, y) log_power_ratio_ba = log_power[0] - log_power[1] assert_array_almost_equal(log_power_ratio_ab, log_power_ratio_ba) def test_csp_component_ordering(): """Test that CSP component ordering works as expected.""" x, y = deterministic_toy_data(['class_a', 'class_b']) pytest.raises(ValueError, CSP, component_order='invalid') # component_order='alternate' only works with two classes csp = CSP(component_order='alternate') with pytest.raises(ValueError): csp.fit(np.zeros((3, 0, 0)), ['a', 'b', 'c']) p_alt = CSP(component_order='alternate').fit(x, y).patterns_ p_mut = CSP(component_order='mutual_info').fit(x, y).patterns_ # This permutation of p_alt and p_mut is explained by the particular # eigenvalues of the toy data: [0.06, 0.1, 0.5, 0.8]. # p_alt arranges them to [0.8, 0.06, 0.5, 0.1] # p_mut arranges them to [0.06, 0.1, 0.8, 0.5] assert_array_almost_equal(p_alt, p_mut[[2, 0, 3, 1]])
bsd-3-clause
bullocke/ge-cdd
python/postprocess/train.py
1
3201
""" Train classifier from multiple images, multiple shapefiles for training a classifier Usage: train.py <train_data_path> <output_fname> [--verbose] [--logfile=<logfile>] [--bands=<bands>] classify.py -h | --help The <input_list> argument must be the path to a csv file containing paths to output CDD raster files. The <train_data_path> argument must be a path to a directory with vector data files (in shapefile format). These vectors must specify the target class of the training pixels. One file per class. The base filename (without extension) is taken as class name. The <output_fname> argument must be af ilename where the trained XY data can be stored Options: -h --help Show this screen. --verbose If given, debug output is writen to stdout. --logfile=<logfile> Optional, log file to output logging to --bands=<bands> A list of bands to use for training and classification """ import logging import numpy as np import os import pandas as pd from docopt import docopt from osgeo import gdal, ogr from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.externals import joblib logger = logging.getLogger(__name__) def make_class_dict(path): # Set up dict to save Xs and Ys driver = ogr.GetDriverByName('ESRI Shapefile') data_source = driver.Open(path, 0) if data_source is None: report_and_exit("File read failed: %s", path) layer = data_source.GetLayer(0) class_labels = [] data = [] for feature in layer: try: var1 = float(feature.GetField('NFDI_mag')) var2 = float(feature.GetField('NFDI_rmse')) var3 = float(feature.GetField('NFDI_sin')) var4 = float(feature.GetField('NFDI_cos')) var5 = float(feature.GetField('Gv_mag')) var6 = float(feature.GetField('Shade_mag')) var7 = float(feature.GetField('NPV_mag')) var8 = float(feature.GetField('Soil_mag')) label = feature.GetField('class') except: continue class_labels.append(label) data.append([var1, var2, var3, var4, var5, var6, var7, var8]) # data.append([var1, var3, var4, var5, var6, var7, var8]) return class_labels, data def report_and_exit(txt, *args, **kwargs): logger.error(txt, *args, **kwargs) exit(1) return xys if __name__ == "__main__": opts = docopt(__doc__) train_data_path = opts["<train_data_path>"] labels, data = make_class_dict(train_data_path) output_fname = opts["<output_fname>"] log_level = logging.DEBUG if opts["--verbose"] else logging.INFO if opts['--logfile']: logfile = opts['--logfile'] fh = logging.FileHandler(logfile) fh.setLevel(logging.DEBUG) logger.addHandler(fh) logging.basicConfig(level=log_level, format='%(asctime)-15s\t %(message)s') # Perform classification # classifier = RandomForestClassifier(n_jobs=4, n_estimators=10, class_weight='balanced') logger.debug("Train the classifier: %s", str(classifier)) classifier.fit(data, labels) joblib.dump(classifier, output_fname, compress=3)
mit
etkirsch/scikit-learn
sklearn/metrics/pairwise.py
49
44088
# -*- coding: utf-8 -*- # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Robert Layton <robertlayton@gmail.com> # Andreas Mueller <amueller@ais.uni-bonn.de> # Philippe Gervais <philippe.gervais@inria.fr> # Lars Buitinck <larsmans@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause import itertools import numpy as np from scipy.spatial import distance from scipy.sparse import csr_matrix from scipy.sparse import issparse from ..utils import check_array from ..utils import gen_even_slices from ..utils import gen_batches from ..utils.fixes import partial from ..utils.extmath import row_norms, safe_sparse_dot from ..preprocessing import normalize from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.joblib.parallel import cpu_count from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan # Utility Functions def _return_float_dtype(X, Y): """ 1. If dtype of X and Y is float32, then dtype float32 is returned. 2. Else dtype float is returned. """ if not issparse(X) and not isinstance(X, np.ndarray): X = np.asarray(X) if Y is None: Y_dtype = X.dtype elif not issparse(Y) and not isinstance(Y, np.ndarray): Y = np.asarray(Y) Y_dtype = Y.dtype else: Y_dtype = Y.dtype if X.dtype == Y_dtype == np.float32: dtype = np.float32 else: dtype = np.float return X, Y, dtype def check_pairwise_arrays(X, Y, precomputed=False): """ Set X and Y appropriately and checks inputs If Y is None, it is set as a pointer to X (i.e. not a copy). If Y is given, this does not happen. All distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the second dimension of the two arrays is equal, or the equivalent check for a precomputed distance matrix. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) precomputed : bool True if X is to be treated as precomputed distances to the samples in Y. Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y, dtype = _return_float_dtype(X, Y) if Y is X or Y is None: X = Y = check_array(X, accept_sparse='csr', dtype=dtype) else: X = check_array(X, accept_sparse='csr', dtype=dtype) Y = check_array(Y, accept_sparse='csr', dtype=dtype) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError("Precomputed metric requires shape " "(n_queries, n_indexed). Got (%d, %d) " "for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError("Incompatible dimension for X and Y matrices: " "X.shape[1] == %d while Y.shape[1] == %d" % ( X.shape[1], Y.shape[1])) return X, Y def check_paired_arrays(X, Y): """ Set X and Y appropriately and checks inputs for paired distances All paired distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the dimensions of the two arrays are equal. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y = check_pairwise_arrays(X, Y) if X.shape != Y.shape: raise ValueError("X and Y should be of same shape. They were " "respectively %r and %r long." % (X.shape, Y.shape)) return X, Y # Pairwise distances def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False, X_norm_squared=None): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors. For efficiency reasons, the euclidean distance between a pair of row vector x and y is computed as:: dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y)) This formulation has two advantages over other ways of computing distances. First, it is computationally efficient when dealing with sparse data. Second, if one argument varies but the other remains unchanged, then `dot(x, x)` and/or `dot(y, y)` can be pre-computed. However, this is not the most precise way of doing this computation, and the distance matrix returned by this function may not be exactly symmetric as required by, e.g., ``scipy.spatial.distance`` functions. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_1, n_features) Y : {array-like, sparse matrix}, shape (n_samples_2, n_features) Y_norm_squared : array-like, shape (n_samples_2, ), optional Pre-computed dot-products of vectors in Y (e.g., ``(Y**2).sum(axis=1)``) squared : boolean, optional Return squared Euclidean distances. X_norm_squared : array-like, shape = [n_samples_1], optional Pre-computed dot-products of vectors in X (e.g., ``(X**2).sum(axis=1)``) Returns ------- distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2) Examples -------- >>> from sklearn.metrics.pairwise import euclidean_distances >>> X = [[0, 1], [1, 1]] >>> # distance between rows of X >>> euclidean_distances(X, X) array([[ 0., 1.], [ 1., 0.]]) >>> # get distance to origin >>> euclidean_distances(X, [[0, 0]]) array([[ 1. ], [ 1.41421356]]) See also -------- paired_distances : distances betweens pairs of elements of X and Y. """ X, Y = check_pairwise_arrays(X, Y) if X_norm_squared is not None: XX = check_array(X_norm_squared) if XX.shape == (1, X.shape[0]): XX = XX.T elif XX.shape != (X.shape[0], 1): raise ValueError( "Incompatible dimensions for X and X_norm_squared") else: XX = row_norms(X, squared=True)[:, np.newaxis] if X is Y: # shortcut in the common case euclidean_distances(X, X) YY = XX.T elif Y_norm_squared is not None: YY = np.atleast_2d(Y_norm_squared) if YY.shape != (1, Y.shape[0]): raise ValueError( "Incompatible dimensions for Y and Y_norm_squared") else: YY = row_norms(Y, squared=True)[np.newaxis, :] distances = safe_sparse_dot(X, Y.T, dense_output=True) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, out=distances) if X is Y: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. distances.flat[::distances.shape[0] + 1] = 0.0 return distances if squared else np.sqrt(distances, out=distances) def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). The minimal distances are also returned. This is mostly equivalent to calling: (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis), pairwise_distances(X, Y=Y, metric=metric).min(axis=axis)) but uses much less memory, and is faster for large arrays. Parameters ---------- X, Y : {array-like, sparse matrix} Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable, default 'euclidean' metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict, optional Keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. distances : numpy.ndarray distances[i] is the distance between the i-th row in X and the argmin[i]-th row in Y. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin """ dist_func = None if metric in PAIRWISE_DISTANCE_FUNCTIONS: dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif not callable(metric) and not isinstance(metric, str): raise ValueError("'metric' must be a string or a callable") X, Y = check_pairwise_arrays(X, Y) if metric_kwargs is None: metric_kwargs = {} if axis == 0: X, Y = Y, X # Allocate output arrays indices = np.empty(X.shape[0], dtype=np.intp) values = np.empty(X.shape[0]) values.fill(np.infty) for chunk_x in gen_batches(X.shape[0], batch_size): X_chunk = X[chunk_x, :] for chunk_y in gen_batches(Y.shape[0], batch_size): Y_chunk = Y[chunk_y, :] if dist_func is not None: if metric == 'euclidean': # special case, for speed d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True) d_chunk *= -2 d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis] d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :] np.maximum(d_chunk, 0, d_chunk) else: d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs) else: d_chunk = pairwise_distances(X_chunk, Y_chunk, metric=metric, **metric_kwargs) # Update indices and minimum values using chunk min_indices = d_chunk.argmin(axis=1) min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start), min_indices] flags = values[chunk_x] > min_values indices[chunk_x][flags] = min_indices[flags] + chunk_y.start values[chunk_x][flags] = min_values[flags] if metric == "euclidean" and not metric_kwargs.get("squared", False): np.sqrt(values, values) return indices, values def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). This is mostly equivalent to calling: pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis) but uses much less memory, and is faster for large arrays. This function works with dense 2D arrays only. Parameters ---------- X : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) Y : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin_min """ if metric_kwargs is None: metric_kwargs = {} return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size, metric_kwargs)[0] def manhattan_distances(X, Y=None, sum_over_features=True, size_threshold=5e8): """ Compute the L1 distances between the vectors in X and Y. With sum_over_features equal to False it returns the componentwise distances. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like An array with shape (n_samples_X, n_features). Y : array_like, optional An array with shape (n_samples_Y, n_features). sum_over_features : bool, default=True If True the function returns the pairwise distance matrix else it returns the componentwise L1 pairwise-distances. Not supported for sparse matrix inputs. size_threshold : int, default=5e8 Unused parameter. Returns ------- D : array If sum_over_features is False shape is (n_samples_X * n_samples_Y, n_features) and D contains the componentwise L1 pairwise-distances (ie. absolute difference), else shape is (n_samples_X, n_samples_Y) and D contains the pairwise L1 distances. Examples -------- >>> from sklearn.metrics.pairwise import manhattan_distances >>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS array([[ 0.]]) >>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[1, 2], [3, 4]],\ [[1, 2], [0, 3]])#doctest:+ELLIPSIS array([[ 0., 2.], [ 4., 4.]]) >>> import numpy as np >>> X = np.ones((1, 2)) >>> y = 2 * np.ones((2, 2)) >>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS array([[ 1., 1.], [ 1., 1.]]...) """ X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError("sum_over_features=%r not supported" " for sparse matrices" % sum_over_features) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, X.shape[1], D) return D if sum_over_features: return distance.cdist(X, Y, 'cityblock') D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1])) def cosine_distances(X, Y=None): """ Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 return S # Paired distances def paired_euclidean_distances(X, Y): """ Computes the paired euclidean distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) return row_norms(X - Y) def paired_manhattan_distances(X, Y): """Compute the L1 distances between the vectors in X and Y. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) diff = X - Y if issparse(diff): diff.data = np.abs(diff.data) return np.squeeze(np.array(diff.sum(axis=1))) else: return np.abs(diff).sum(axis=-1) def paired_cosine_distances(X, Y): """ Computes the paired cosine distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray, shape (n_samples, ) Notes ------ The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm """ X, Y = check_paired_arrays(X, Y) return .5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { 'cosine': paired_cosine_distances, 'euclidean': paired_euclidean_distances, 'l2': paired_euclidean_distances, 'l1': paired_manhattan_distances, 'manhattan': paired_manhattan_distances, 'cityblock': paired_manhattan_distances} def paired_distances(X, Y, metric="euclidean", **kwds): """ Computes the paired distances between X and Y. Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc... Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : ndarray (n_samples, n_features) Array 1 for distance computation. Y : ndarray (n_samples, n_features) Array 2 for distance computation. metric : string or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options specified in PAIRED_DISTANCES, including "euclidean", "manhattan", or "cosine". Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. Returns ------- distances : ndarray (n_samples, ) Examples -------- >>> from sklearn.metrics.pairwise import paired_distances >>> X = [[0, 1], [1, 1]] >>> Y = [[0, 1], [2, 1]] >>> paired_distances(X, Y) array([ 0., 1.]) See also -------- pairwise_distances : pairwise distances. """ if metric in PAIRED_DISTANCES: func = PAIRED_DISTANCES[metric] return func(X, Y) elif callable(metric): # Check the matrix first (it is usually done by the metric) X, Y = check_paired_arrays(X, Y) distances = np.zeros(len(X)) for i in range(len(X)): distances[i] = metric(X[i], Y[i]) return distances else: raise ValueError('Unknown distance %s' % metric) # Kernels def linear_kernel(X, Y=None): """ Compute the linear kernel between X and Y. Read more in the :ref:`User Guide <linear_kernel>`. Parameters ---------- X : array of shape (n_samples_1, n_features) Y : array of shape (n_samples_2, n_features) Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) return safe_sparse_dot(X, Y.T, dense_output=True) def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): """ Compute the polynomial kernel between X and Y:: K(X, Y) = (gamma <X, Y> + coef0)^degree Read more in the :ref:`User Guide <polynomial_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 degree : int, default 3 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 K **= degree return K def sigmoid_kernel(X, Y=None, gamma=None, coef0=1): """ Compute the sigmoid kernel between X and Y:: K(X, Y) = tanh(gamma <X, Y> + coef0) Read more in the :ref:`User Guide <sigmoid_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 Returns ------- Gram matrix: array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 np.tanh(K, K) # compute tanh in-place return K def rbf_kernel(X, Y=None, gamma=None): """ Compute the rbf (gaussian) kernel between X and Y:: K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : array of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K def cosine_similarity(X, Y=None, dense_output=True): """Compute cosine similarity between samples in X and Y. Cosine similarity, or the cosine kernel, computes similarity as the normalized dot product of X and Y: K(X, Y) = <X, Y> / (||X||*||Y||) On L2-normalized data, this function is equivalent to linear_kernel. Read more in the :ref:`User Guide <cosine_similarity>`. Parameters ---------- X : ndarray or sparse array, shape: (n_samples_X, n_features) Input data. Y : ndarray or sparse array, shape: (n_samples_Y, n_features) Input data. If ``None``, the output will be the pairwise similarities between all samples in ``X``. dense_output : boolean (optional), default True Whether to return dense output even when the input is sparse. If ``False``, the output is sparse if both input arrays are sparse. Returns ------- kernel matrix : array An array with shape (n_samples_X, n_samples_Y). """ # to avoid recursive import X, Y = check_pairwise_arrays(X, Y) X_normalized = normalize(X, copy=True) if X is Y: Y_normalized = X_normalized else: Y_normalized = normalize(Y, copy=True) K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output) return K def additive_chi2_kernel(X, Y=None): """Computes the additive chi-squared kernel between observations in X and Y The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result def chi2_kernel(X, Y=None, gamma=1.): """Computes the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default=1. Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- additive_chi2_kernel : The additive version of this kernel sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K) # Helper functions - distance PAIRWISE_DISTANCE_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'cityblock': manhattan_distances, 'cosine': cosine_distances, 'euclidean': euclidean_distances, 'l2': euclidean_distances, 'l1': manhattan_distances, 'manhattan': manhattan_distances, 'precomputed': None, # HACK: precomputed is always allowed, never called } def distance_metrics(): """Valid metrics for pairwise_distances. This function simply returns the valid pairwise distance metrics. It exists to allow for a description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: ============ ==================================== metric Function ============ ==================================== 'cityblock' metrics.pairwise.manhattan_distances 'cosine' metrics.pairwise.cosine_distances 'euclidean' metrics.pairwise.euclidean_distances 'l1' metrics.pairwise.manhattan_distances 'l2' metrics.pairwise.euclidean_distances 'manhattan' metrics.pairwise.manhattan_distances ============ ==================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_DISTANCE_FUNCTIONS def _parallel_pairwise(X, Y, func, n_jobs, **kwds): """Break the pairwise matrix in n_jobs even slices and compute them in parallel""" if n_jobs < 0: n_jobs = max(cpu_count() + 1 + n_jobs, 1) if Y is None: Y = X if n_jobs == 1: # Special case to avoid picklability checks in delayed return func(X, Y, **kwds) # TODO: in some cases, backend='threading' may be appropriate fd = delayed(func) ret = Parallel(n_jobs=n_jobs, verbose=0)( fd(X, Y[s], **kwds) for s in gen_even_slices(Y.shape[0], n_jobs)) return np.hstack(ret) def _pairwise_callable(X, Y, metric, **kwds): """Handle the callable case for pairwise_{distances,kernels} """ X, Y = check_pairwise_arrays(X, Y) if X is Y: # Only calculate metric for upper triangle out = np.zeros((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.combinations(range(X.shape[0]), 2) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) # Make symmetric # NB: out += out.T will produce incorrect results out = out + out.T # Calculate diagonal # NB: nonzero diagonals are allowed for both metrics and kernels for i in range(X.shape[0]): x = X[i] out[i, i] = metric(x, x, **kwds) else: # Calculate all cells out = np.empty((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.product(range(X.shape[0]), range(Y.shape[0])) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) return out _VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"] def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds): """ Compute the distance matrix from a vector array X and optional Y. This method takes either a vector array or a distance matrix, and returns a distance matrix. If the input is a vector array, the distances are computed. If the input is a distances matrix, it is returned instead. This method provides a safe way to take a distance matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise distance between the arrays from both X and Y. Valid values for metric are: - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']. These metrics support sparse matrix inputs. - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. These metrics do not support sparse matrix inputs. Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are valid scipy.spatial.distance metrics), the scikit-learn implementation will be used, which is faster and has support for sparse matrices (except for 'cityblock'). For a verbose description of the metrics from scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics function. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. Y : array [n_samples_b, n_features], optional An optional second feature array. Only allowed if metric != "precomputed". metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. `**kwds` : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A distance matrix D such that D_{i, j} is the distance between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then D_{i, j} is the distance between the ith array from X and the jth array from Y. """ if (metric not in _VALID_METRICS and not callable(metric) and metric != "precomputed"): raise ValueError("Unknown metric %s. " "Valid metrics are %s, or 'precomputed', or a " "callable" % (metric, _VALID_METRICS)) if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: if issparse(X) or issparse(Y): raise TypeError("scipy distance metrics do not" " support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if n_jobs == 1 and X is Y: return distance.squareform(distance.pdist(X, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) # Helper functions - distance PAIRWISE_KERNEL_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'additive_chi2': additive_chi2_kernel, 'chi2': chi2_kernel, 'linear': linear_kernel, 'polynomial': polynomial_kernel, 'poly': polynomial_kernel, 'rbf': rbf_kernel, 'sigmoid': sigmoid_kernel, 'cosine': cosine_similarity, } def kernel_metrics(): """ Valid metrics for pairwise_kernels This function simply returns the valid pairwise distance metrics. It exists, however, to allow for a verbose description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'additive_chi2' sklearn.pairwise.additive_chi2_kernel 'chi2' sklearn.pairwise.chi2_kernel 'linear' sklearn.pairwise.linear_kernel 'poly' sklearn.pairwise.polynomial_kernel 'polynomial' sklearn.pairwise.polynomial_kernel 'rbf' sklearn.pairwise.rbf_kernel 'sigmoid' sklearn.pairwise.sigmoid_kernel 'cosine' sklearn.pairwise.cosine_similarity =============== ======================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_KERNEL_FUNCTIONS KERNEL_PARAMS = { "additive_chi2": (), "chi2": (), "cosine": (), "exp_chi2": frozenset(["gamma"]), "linear": (), "poly": frozenset(["gamma", "degree", "coef0"]), "polynomial": frozenset(["gamma", "degree", "coef0"]), "rbf": frozenset(["gamma"]), "sigmoid": frozenset(["gamma", "coef0"]), } def pairwise_kernels(X, Y=None, metric="linear", filter_params=False, n_jobs=1, **kwds): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine'] Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. filter_params: boolean Whether to filter invalid parameters or not. `**kwds` : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = dict((k, kwds[k]) for k in kwds if k in KERNEL_PARAMS[metric]) func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
bsd-3-clause
MaciCrowell/TCGA_DataScience
TCGAlogReg.py
1
13158
import random import glm import re import thinkstats2 import thinkplot import math import numpy as np import matplotlib.pyplot as pyplot import DataUtilities def run_regression_and_print(survey, version, means): """Runs a logistic regression and prints results survey: Survey version: which model to run means: map from variables to nominal values """ print 'Version', version print 'N', survey.len() regs = run_regression(survey, version) regs.print_regression_reports(means) regs.summarize(means) return regs def run_regression(survey, version): """Runs logistic regressions. survey: Survey version: which model to run Returns: Regressions object """ dep, control = DataUtilities.get_version(version) print dep, control reg = survey.make_logistic_regression(dep, control) #print reg return Regressions([reg]) def read_complete(version,patients): survey = read_survey(patients) # give respondents random values #[r.clean_random() for r in survey.respondents()] # select complete records dep, control = DataUtilities.get_version(version) #for var in [dep] + control: # print r'\verb"%s",' % var attrs = [dep] + control complete = survey.subsample(lambda r: r.is_complete(attrs)) return survey, complete class Survey(object): """Represents a set of respondents as a map from caseid to Respondent.""" def __init__(self, rs=None): if rs is None: self.rs = {} else: self.rs = rs self.cdf = None def add_respondent(self, r): """Adds a respondent to this survey.""" self.rs[r.caseid] = r def add_respondents(self, rs): """Adds respondents to this survey.""" [self.add_respondent(r) for r in rs] def len(self): """Number of respondents.""" return len(self.rs) def respondents(self): """Returns an iterator over the respondents.""" return self.rs.itervalues() def lookup(self, caseid): """Looks up a caseid and returns the Respondent object.""" return self.rs[caseid] def loadPatients(self, patients): self.rs = patients def subsample(self, filter_func): """Form a new cohort by filtering respondents filter_func: function that takes a respondent and returns boolean Returns: Survey """ pairs = [(r.caseid, r) for r in self.respondents() if filter_func(r)] rs = dict(pairs) return Survey(rs) def make_logistic_regression(self, dep, control, exp_vars=[]): """Runs a logistic regression. dep: string dependent variable name control: list of string control variables exp_vars: list of string independent variable names Returns: LogRegression object """ s = ' + '.join(control + exp_vars) model = '%s ~ %s' % (dep, s) reg = self.logistic_regression(model) null_model = make_null_model(self, dep) reg.null_sip = self.self_information_partition(dep, null_model) reg.model_sip = self.self_information_partition(dep, reg) reg.sip = reg.null_sip - reg.model_sip #print reg.null_sip, reg.model_sip, reg.sip return reg def logistic_regression(self, model, print_flag=False): """Performs a regression. model: string model in r format print_flag: boolean, whether to print results Returns: LogRegression object """ def clean(attr): m = re.match('as.factor\((.*)\)', attr) if m: return m.group(1) return attr # pull out the attributes in the model rows = [] t = model.split() attrs = [clean(attr) for attr in model.split() if len(attr)>1] for r in self.respondents(): row = [getattr(r, attr) for attr in attrs] rows.append(row) rows = [row for row in rows if 'NA' not in row] # inject the data and runs the model col_dict = dict(zip(attrs, zip(*rows))) glm.inject_col_dict(col_dict) res = glm.logit_model(model, print_flag=print_flag) return LogRegression(res) def make_pmf(self, attr, na_flag=False): """Make a PMF for an attribute. Uses compwt to weight respondents. attr: string attr name na_flag: boolean, whether to remove NAs Returns: normalized PMF """ pmf = thinkstats2.Pmf() for r in self.respondents(): val = getattr(r, attr) wt = 1 #r.compwt pmf.Incr(val, wt) if na_flag: pmf.Set('NA', 0) pmf.Normalize() return pmf def self_information_partition(self, attr, model): """Computes the self information of a partition. Does not take into account compwt attr: string binary attribute model: object with a fit_prob method that takes a respondent Returns: float number of bits """ def log2(x, denom=math.log(2)): return math.log(x) / denom total = 0.0 n = 0.0 for r in self.respondents(): x = getattr(r, attr) if x == 'NA': # if we don't know the answer, we got zero bits of info continue p = model.fit_prob(r) assert p != 'NA' if x == 1: total += -log2(p) elif x == 0: total += -log2(1-p) else: raise ValueError('Values must be 0, 1 or NA') return total def fraction_one(pmf): yes, no = pmf.Prob(1), pmf.Prob(0) return float(yes) / (yes+no) def cumulative_odds(estimates, means): """Computes cumulative odds based on a sequence of estimates. Iterates the attributes and computes the odds ratio, for the given value, and the probability that corresponds to the cumulative odds. estimates: list of (name, est, error, z) means: map from attribute to value Returns: list of (name, odds, p) """ total_odds = 1.0 res = [] for name, est, _, _ in estimates: mean = means.get(name, 1) odds = math.exp(est * mean) total_odds *= odds p = 100 * total_odds / (1 + total_odds) res.append((name, odds, p)) return res def print_cumulative_odds(cumulative_odds): """Prints a summary of the estimated parameters. cumulative_odds: list of (name, odds, p) """ print '\t\todds\tcumulative' print '\t\tratio\tprobability\tdiff' prev = None for name, odds, p in cumulative_odds: if prev: diff = p - prev print '%11s\t%0.2g\t%0.2g\t%0.2g' % (name, odds, p, diff) else: print '%11s\t%0.2g\t%0.2g' % (name, odds, p) prev = p def compute_ci(col): """Computes a 95% confidence interval. col: sequence of values Returns: CI tuple, p-value """ n = len(col) index = n / 40 mid = n / 2 t = list(col) t.sort() median = t[mid] pval = compute_pvalue(median, t) low, high = t[index], t[-index-1] ci = np.array([median, low, high]) return ci, pval def compute_pvalue(median, t): """Computes the p-value for a list of outcomes. Counts the fraction of outcomes with the opposite sign from the median (or 0). median: median value from the list t: list of outcomes Returns: float prob """ if median > 0: opp = [x for x in t if x <= 0] else: opp = [x for x in t if x >= 0] fraction = float(len(opp)) / len(t) return fraction class LogRegression(object): def __init__(self, res): """Makes a LogRegression object res: result object from rpy2 estimates: list of (name, est, error, z) """ self.res = res self.estimates, self.aic = glm.get_coeffs(res) def fit_prob(self, r): """Computes the fitted probability for the given respondent. r: Respondent Returns: float prob """ log_odds = 0 for name, est, error, z in self.estimates: if name == '(Intercept)': log_odds += est else: x = getattr(r, name) if x == 'NA': print name return 'NA' log_odds += est * x odds = math.exp(log_odds) p = odds / (1 + odds) return p def validate(self, respondents, attr): for r in respondents: dv = getattr(r, attr) p = self.fit_prob(r) #print r.caseid, dv, p def report(self): """Prints a summary of the glm results.""" if self.res is None: print 'No summary' glm.print_summary(self.res) def report_odds(self, means, printCum = True): """Prints a summary of the estimated parameters. Iterates the attributes and computes the odds ratio, for the given value, and the probability that corresponds to the cumulative odds. means: map from attribute to value """ cumulative = cumulative_odds(self.estimates, means) if printCum: print_cumulative_odds(cumulative) return cumulative def make_pickleable(self): self.res = None def make_null_model(survey, attr): """Computes the self information of an attribute. Total surprisal of the attr if we knew nothing about the respondents. survey: Survey attr: string attribute name """ pmf = survey.make_pmf(attr) p = fraction_one(pmf) model = NullModel(p) return model class NullModel(object): def __init__(self, p): """Make a NullModel. p: probability that a respondent has some property """ self.p = p def fit_prob(self, r): """Computes the fitted probability for the given respondent. r: Respondent Returns: float prob """ return self.p class Regressions(object): def __init__(self, regs): self.regs = regs reg = regs[0] self.names = [name for name, _, _, _ in reg.estimates] def get(self, i): return self.regs[i] def print_regression_reports(self, means): for reg in self.regs: reg.report() reg.report_odds(means) print 'AIC', reg.aic print 'SIP', reg.sip def median_model(self): rows = [] # for each regression, make a list of estimates for reg in self.regs: row = [est for name, est, _, _ in reg.estimates] rows.append(row) # cols is one column per variable cols = zip(*rows) # compute cis for the estimates medians = [] for col in cols: ci, pval = compute_ci(col) median, low, high = ci medians.append(median) for name, median in zip(self.names, medians): print name, median def summarize(self, means): self.summarize_estimates(means) self.summarize_cumulatives(means) self.summarize_information() def summarize_estimates(self, means): """Generate summary statistics for a set of regressions. regs: list of LogRegression means: map from variable to reference value """ rows = [] # for each regression, make a list of estimates for reg in self.regs: row = [est * means.get(name, 1) for name, est, _, _ in reg.estimates] rows.append(row) # cols is one column per variable cols = zip(*rows) # compute cis for the estimates cis = [] pvals = [] for col in cols: ci, pval = compute_ci(col) cis.append(ci) pvals.append(pval) self.estimate_cis = cis self.pvals = pvals def summarize_cumulatives(self, means): """Generate summary statistics for a set of regressions. means: map from variable to reference value """ rows = [] for reg in self.regs: cumulative = cumulative_odds(reg.estimates, means) rows.append([p for _, _, p in cumulative]) # compute cis for the cumulative probabilities cols = zip(*rows) cis = [] for col in cols: ci, pval = compute_ci(col) cis.append(ci) self.cumulative_cis = cis def summarize_information(self): """Generate summary statistics for a set of regressions. regs: list of LogRegression means: map from variable to reference value """ self.aics = [reg.aic for reg in self.regs] self.sips = [reg.sip for reg in self.regs] def print_table(self): """Prints the table in human-readable form.""" data = zip(self.names, self.estimate_cis, self.pvals, self.cumulative_cis) for name, ci, pval, cumulative in data: odds_ci = np.exp(ci) print '%15.15s \t' % name, print format_range(odds_ci), ' \t', print format_range(cumulative), '\t', print format_pvalue(pval) ci, pval = compute_ci(self.sips) print 'SIP:', format_range(ci, 3), format_pvalue(pval) def write_table(self, filename): """Writes the table in latex.""" data = zip(self.names, self.estimate_cis, self.pvals, self.cumulative_cis) header = ['Variable', 'Odds ratio', 'Probability', 'p-value', ] rows = [] for name, ci, pval, cumulative in data: odds_ci = np.exp(ci) row = [ r'\verb"%s"' % name, format_range(odds_ci), format_range(cumulative), format_pvalue(pval), ] rows.append(row) fp = open(filename, 'w') format = '|l|r|r|r|' write_latex_table(fp, header, rows, format) fp.close() def read_survey(patients): survey = Survey() survey.loadPatients(patients) return survey def test_models(version=(30,-1), resample_flag=False, patients = -1, printReg = True): means = dict(educ_from_12=4, born_from_1960=10) if patients == -1: patients = DataUtilities.getDictReadofPatientsFilled() #patients = getDictReadofPatientsFilled() # read the survey survey, complete = read_complete(version, patients) print DataUtilities.get_version(version) #compare_survey_and_complete(survey, complete) #print 'all respondents', survey.len() #print 'complete', complete.len() # run the models if printReg: regs = run_regression_and_print(survey, version=version, means=means) else: regs = run_regression(survey, version) return regs def main(script): test_models(version = (30,-1)) return if __name__ == '__main__': import sys main(*sys.argv)
mit
MohammedWasim/scikit-learn
examples/ensemble/plot_voting_decision_regions.py
230
2386
""" ================================================== Plot the decision boundaries of a VotingClassifier ================================================== Plot the decision boundaries of a `VotingClassifier` for two features of the Iris dataset. Plot the class probabilities of the first sample in a toy dataset predicted by three different classifiers and averaged by the `VotingClassifier`. First, three examplary classifiers are initialized (`DecisionTreeClassifier`, `KNeighborsClassifier`, and `SVC`) and used to initialize a soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that the predicted probabilities of the `DecisionTreeClassifier` and `SVC` count 5 times as much as the weights of the `KNeighborsClassifier` classifier when the averaged probability is calculated. """ print(__doc__) from itertools import product import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.ensemble import VotingClassifier # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training classifiers clf1 = DecisionTreeClassifier(max_depth=4) clf2 = KNeighborsClassifier(n_neighbors=7) clf3 = SVC(kernel='rbf', probability=True) eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2), ('svc', clf3)], voting='soft', weights=[2, 1, 2]) clf1.fit(X, y) clf2.fit(X, y) clf3.fit(X, y) eclf.fit(X, y) # Plotting decision regions x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1)) f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8)) for idx, clf, tt in zip(product([0, 1], [0, 1]), [clf1, clf2, clf3, eclf], ['Decision Tree (depth=4)', 'KNN (k=7)', 'Kernel SVM', 'Soft Voting']): Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4) axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8) axarr[idx[0], idx[1]].set_title(tt) plt.show()
bsd-3-clause
timothy1191xa/project-epsilon-1
code/utils/scripts/convolution_normal_script.py
3
3081
""" Purpose: ----------------------------------------------------------------------------------- We generate convolved hemodynamic neural prediction into seperated txt files for all four conditions (task, gain, lost, distance), and also generate plots for 4 BOLD signals over time for each of them too. Steps: ----------------------------------------------------------------------------------- 1. Extract 4 conditions of each subject's run 2. Load the data to get the 4th dimension shape 3. Convolve with hrf 4. Plot sampled HRFs with the high resolution neural time course 5. Save the convolved data into txt files """ from __future__ import absolute_import, division, print_function import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) import numpy as np import matplotlib.pyplot as plt import nibabel as nib from stimuli import * from scipy.stats import gamma from organize_behavior_data import * # Create the necessary directories if they do not exist dirs = ['../../../txt_output', '../../../txt_output/conv_normal',\ '../../../fig','../../../fig/conv_normal'] for d in dirs: if not os.path.exists(d): os.makedirs(d) # Locate the different paths project_path = '../../../' data_path = project_path+'data/ds005/' #change here to get your subject ! subject_list = [str(i) for i in range(1,17)] #subject_list = ['1','5'] #change here to get your run number ! run_list = [str(i) for i in range(1,4)] cond_list = [str(i) for i in range(1,5)] # Loop through conditions by subject and by run condition_paths = [('ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv'+ c.zfill(3), \ data_path + 'sub' + s.zfill(3) + '/model/model001/onsets/task001_run' \ + r.zfill(3) + '/cond'+ c.zfill(3) + '.txt') for c in cond_list \ for r in run_list \ for s in subject_list] condition = ['task','gain','loss','dist'] #Use the first image to get the data dimensions image_path = data_path + 'sub001/BOLD/task001_run001/bold.nii.gz' img = nib.load(image_path) data_int = img.get_data() data = data_int.astype(float) #set the TR TR = 2.0 #get canonical hrf tr_times = np.arange(0, data.shape[2], TR) hrf_at_trs = hrf(tr_times) n_vols = data.shape[-1] vol_shape = data.shape[:-1] all_tr_times = np.arange(data.shape[-1]) * TR for cond_path in condition_paths: name = cond_path[0] path = cond_path[1] cond = np.loadtxt(path, skiprows = 1) neural_prediction = events2neural(cond,TR,n_vols) convolved = np.convolve(neural_prediction, hrf_at_trs) convolved = convolved[:-(len(hrf_at_trs)-1)] #plot plt.plot(all_tr_times, neural_prediction, label="neural_prediction") plt.plot(all_tr_times, convolved, label="convolved") plt.title(name+'_%s'%(condition[int(name[24])-1])) plt.xlabel('Time (seconds)') plt.ylabel('Convolved values at TR onsets (condition: %s)'%(condition[int(name[24])-1])) plt.legend(loc='lower right') plt.savefig(dirs[3]+'/'+ name +'_canonical.png') plt.close() #save the txt file np.savetxt(dirs[1] +'/'+ name +'_canonical.txt', convolved)
bsd-3-clause
theoryno3/scikit-learn
sklearn/covariance/__init__.py
389
1157
""" The :mod:`sklearn.covariance` module includes methods and algorithms to robustly estimate the covariance of features given a set of points. The precision matrix defined as the inverse of the covariance is also estimated. Covariance estimation is closely related to the theory of Gaussian Graphical Models. """ from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \ log_likelihood from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \ ledoit_wolf, ledoit_wolf_shrinkage, \ LedoitWolf, oas, OAS from .robust_covariance import fast_mcd, MinCovDet from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV from .outlier_detection import EllipticEnvelope __all__ = ['EllipticEnvelope', 'EmpiricalCovariance', 'GraphLasso', 'GraphLassoCV', 'LedoitWolf', 'MinCovDet', 'OAS', 'ShrunkCovariance', 'empirical_covariance', 'fast_mcd', 'graph_lasso', 'ledoit_wolf', 'ledoit_wolf_shrinkage', 'log_likelihood', 'oas', 'shrunk_covariance']
bsd-3-clause
AstroVPK/libcarma
tests/test_pickle.py
2
2259
import math import numpy as np import copy import unittest import random import psutil import os import sys import cPickle as pickle import tempfile import shutil import pdb import matplotlib.pyplot as plt import matplotlib.cm as colormap try: import kali.carma except ImportError: print 'Cannot import kali.carma! kali is not setup. Setup kali by sourcing bin/setup.sh' sys.exit(1) try: import kali.s82 except ImportError: print 'Cannot import kali.s82! kali is not setup. Setup kali by sourcing bin/setup.sh' sys.exit(1) plt.ion() skipWorking = False BURNSEED = 731647386 DISTSEED = 219038190 NOISESEED = 87238923 SAMPLESEED = 36516342 ZSSEED = 384789247 WALKERSEED = 738472981 MOVESEED = 131343786 XSEED = 2348713647 @unittest.skipUnless(kali.s82.ONLINE, 's82 server offline! Skipping test...') class TestPickleLC(unittest.TestCase): def setUp(self): self.dirpath = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.dirpath) def test_pickleLightcurve(self): aNewSDSSlc = kali.s82.sdssLC(name='', band='r') lcName = aNewSDSSlc.name pickle.dump(aNewSDSSlc, open(os.path.join(self.dirpath, '%s.pkl'%(aNewSDSSlc.name)), 'wb')) del aNewSDSSlc aNewSDSSlcReborn = pickle.load(open(os.path.join(self.dirpath, '%s.pkl'%(lcName)), 'rb')) aNewSDSSlcDoppelganger = kali.s82.sdssLC(name='%s'%(lcName), band='r') np.testing.assert_array_equal(aNewSDSSlcReborn.t, aNewSDSSlcDoppelganger.t) @unittest.skipUnless(kali.s82.ONLINE, 's82 server offline! Skipping test...') class TestPickleTask(unittest.TestCase): def setUp(self): self.dirpath = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.dirpath) def test_pickleLightcurve(self): aNewSDSSlc = kali.s82.sdssLC(name='', band='r') aNewTask = kali.carma.CARMATask(1, 0) aNewTask.fit(aNewSDSSlc) aNewTask.bestTau pickle.dump(aNewTask, open(os.path.join(self.dirpath, 'Task.pkl'), 'wb')) aNewTaskReborn = pickle.load(open(os.path.join(self.dirpath, 'Task.pkl'), 'rb')) np.testing.assert_array_equal(aNewTaskReborn.timescaleChain, aNewTask.timescaleChain) if __name__ == "__main__": unittest.main()
gpl-2.0
bgroveben/python3_machine_learning_projects
oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/make_blobs.py
6
3190
import numbers import numpy as np from sklearn.utils import check_array, check_random_state from sklearn.utils import shuffle as shuffle_ def make_blobs(n_samples=100, n_features=2, centers=2, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, or tuple, optional (default=100) The total number of points equally divided among clusters. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std: float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box: pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) See also -------- make_classification: a more intricate variant """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] if isinstance(cluster_std, numbers.Real): cluster_std = np.ones(len(centers)) * cluster_std X = [] y = [] n_centers = centers.shape[0] if isinstance(n_samples, numbers.Integral): n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 else: n_samples_per_center = n_samples for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): X.append(centers[i] + generator.normal(scale=std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: X, y = shuffle_(X, y, random_state=generator) return X, y
mit
Achuth17/scikit-learn
sklearn/ensemble/__init__.py
217
1307
""" The :mod:`sklearn.ensemble` module includes ensemble-based methods for classification and regression. """ from .base import BaseEnsemble from .forest import RandomForestClassifier from .forest import RandomForestRegressor from .forest import RandomTreesEmbedding from .forest import ExtraTreesClassifier from .forest import ExtraTreesRegressor from .bagging import BaggingClassifier from .bagging import BaggingRegressor from .weight_boosting import AdaBoostClassifier from .weight_boosting import AdaBoostRegressor from .gradient_boosting import GradientBoostingClassifier from .gradient_boosting import GradientBoostingRegressor from .voting_classifier import VotingClassifier from . import bagging from . import forest from . import weight_boosting from . import gradient_boosting from . import partial_dependence __all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor", "RandomTreesEmbedding", "ExtraTreesClassifier", "ExtraTreesRegressor", "BaggingClassifier", "BaggingRegressor", "GradientBoostingClassifier", "GradientBoostingRegressor", "AdaBoostClassifier", "AdaBoostRegressor", "VotingClassifier", "bagging", "forest", "gradient_boosting", "partial_dependence", "weight_boosting"]
bsd-3-clause
robertwb/incubator-beam
sdks/python/apache_beam/runners/interactive/utils.py
4
9728
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Utilities to be used in Interactive Beam. """ import functools import hashlib import json import logging import pandas as pd from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload from apache_beam.testing.test_stream import WindowedValueHolder from apache_beam.typehints.schemas import named_fields_from_element_type _LOGGER = logging.getLogger(__name__) def to_element_list( reader, # type: Generator[Union[TestStreamPayload.Event, WindowedValueHolder]] coder, # type: Coder include_window_info, # type: bool n=None, # type: int include_time_events=False, # type: bool ): # type: (...) -> List[WindowedValue] """Returns an iterator that properly decodes the elements from the reader. """ # Defining a generator like this makes it easier to limit the count of # elements read. Otherwise, the count limit would need to be duplicated. def elements(): for e in reader: if isinstance(e, TestStreamPayload.Event): if (e.HasField('watermark_event') or e.HasField('processing_time_event')): if include_time_events: yield e else: for tv in e.element_event.elements: decoded = coder.decode(tv.encoded_element) yield ( decoded.windowed_value if include_window_info else decoded.windowed_value.value) elif isinstance(e, WindowedValueHolder): yield ( e.windowed_value if include_window_info else e.windowed_value.value) else: yield e # Because we can yield multiple elements from a single TestStreamFileRecord, # we have to limit the count here to ensure that `n` is fulfilled. count = 0 for e in elements(): if n and count >= n: break yield e if not isinstance(e, TestStreamPayload.Event): count += 1 def elements_to_df(elements, include_window_info=False, element_type=None): # type: (List[WindowedValue], bool, Any) -> DataFrame """Parses the given elements into a Dataframe. If the elements are a list of WindowedValues, then it will break out the elements into their own DataFrame and return it. If include_window_info is True, then it will concatenate the windowing information onto the elements DataFrame. """ try: columns_names = [ name for name, _ in named_fields_from_element_type(element_type) ] except TypeError: columns_names = None rows = [] windowed_info = [] for e in elements: rows.append(e.value) if include_window_info: windowed_info.append([e.timestamp.micros, e.windows, e.pane_info]) using_dataframes = isinstance(element_type, pd.DataFrame) using_series = isinstance(element_type, pd.Series) if using_dataframes or using_series: rows_df = pd.concat(rows) else: rows_df = pd.DataFrame(rows, columns=columns_names) if include_window_info and not using_series: windowed_info_df = pd.DataFrame( windowed_info, columns=['event_time', 'windows', 'pane_info']) final_df = pd.concat([rows_df, windowed_info_df], axis=1) else: final_df = rows_df return final_df def register_ipython_log_handler(): # type: () -> None """Adds the IPython handler to a dummy parent logger (named 'apache_beam.runners.interactive') of all interactive modules' loggers so that if is_in_notebook, logging displays the logs as HTML in frontends. """ # apache_beam.runners.interactive is not a module, thus this "root" logger is # a dummy one created to hold the IPython log handler. When children loggers # have propagate as True (by default) and logging level as NOTSET (by default, # so the "root" logger's logging level takes effect), the IPython log handler # will be triggered at the "root"'s own logging level. And if a child logger # sets its logging level, it can take control back. interactive_root_logger = logging.getLogger('apache_beam.runners.interactive') if any([isinstance(h, IPythonLogHandler) for h in interactive_root_logger.handlers]): return interactive_root_logger.setLevel(logging.INFO) interactive_root_logger.addHandler(IPythonLogHandler()) # Disable the propagation so that logs emitted from interactive modules should # only be handled by loggers and handlers defined within interactive packages. interactive_root_logger.propagate = False class IPythonLogHandler(logging.Handler): """A logging handler to display logs as HTML in IPython backed frontends.""" # TODO(BEAM-7923): Switch to Google hosted CDN once # https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved. log_template = """ <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous"> <div class="alert alert-{level}">{msg}</div>""" logging_to_alert_level_map = { logging.CRITICAL: 'danger', logging.ERROR: 'danger', logging.WARNING: 'warning', logging.INFO: 'info', logging.DEBUG: 'dark', logging.NOTSET: 'light' } def emit(self, record): try: from html import escape from IPython.core.display import HTML from IPython.core.display import display display( HTML( self.log_template.format( level=self.logging_to_alert_level_map[record.levelno], msg=escape(record.msg % record.args)))) except ImportError: pass # NOOP when dependencies are not available. def obfuscate(*inputs): # type: (*Any) -> str """Obfuscates any inputs into a hexadecimal string.""" str_inputs = [str(input) for input in inputs] merged_inputs = '_'.join(str_inputs) return hashlib.md5(merged_inputs.encode('utf-8')).hexdigest() class ProgressIndicator(object): """An indicator visualizing code execution in progress.""" # TODO(BEAM-7923): Switch to Google hosted CDN once # https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved. spinner_template = """ <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous"> <div id="{id}" class="spinner-border text-info" role="status"> </div>""" spinner_removal_template = """ $("#{id}").remove();""" def __init__(self, enter_text, exit_text): # type: (str, str) -> None self._id = 'progress_indicator_{}'.format(obfuscate(id(self))) self._enter_text = enter_text self._exit_text = exit_text def __enter__(self): try: from IPython.core.display import HTML from IPython.core.display import display from apache_beam.runners.interactive import interactive_environment as ie if ie.current_env().is_in_notebook: display(HTML(self.spinner_template.format(id=self._id))) else: display(self._enter_text) except ImportError as e: _LOGGER.error( 'Please use interactive Beam features in an IPython' 'or notebook environment: %s' % e) def __exit__(self, exc_type, exc_value, traceback): try: from IPython.core.display import Javascript from IPython.core.display import display from IPython.core.display import display_javascript from apache_beam.runners.interactive import interactive_environment as ie if ie.current_env().is_in_notebook: script = self.spinner_removal_template.format(id=self._id) display_javascript( Javascript( ie._JQUERY_WITH_DATATABLE_TEMPLATE.format( customized_script=script))) else: display(self._exit_text) except ImportError as e: _LOGGER.error( 'Please use interactive Beam features in an IPython' 'or notebook environment: %s' % e) def progress_indicated(func): # type: (Callable[..., Any]) -> Callable[..., Any] """A decorator using a unique progress indicator as a context manager to execute the given function within.""" @functools.wraps(func) def run_within_progress_indicator(*args, **kwargs): with ProgressIndicator('Processing...', 'Done.'): return func(*args, **kwargs) return run_within_progress_indicator def as_json(func): # type: (Callable[..., Any]) -> Callable[..., str] """A decorator convert python objects returned by callables to json string. The decorated function should always return an object parsable by json.dumps. If the object is not parsable, the str() of original object is returned instead. """ def return_as_json(*args, **kwargs): try: return_value = func(*args, **kwargs) return json.dumps(return_value) except TypeError: return str(return_value) return return_as_json
apache-2.0
hantek/deeplearn_hsi
ksc_joint_SdA.py
1
12752
__author__ = "Zhouhan LIN" __date__ = "June 2013" __version__ = "1.0" import os import sys import time import pdb import scipy.io as sio import numpy import scipy import theano import theano.tensor as T from scipy.stats import t from sklearn import svm from sklearn.metrics import confusion_matrix from theano.tensor.shared_randomstreams import RandomStreams from SdA import SdA from hsi_utils import * cmap = numpy.asarray( [[0, 0, 0], [95, 205, 50], [255, 0, 255], [215, 115, 0], [180, 30, 0], [0, 50, 0], [75, 0, 0], [255, 255, 255], [145, 130, 135], [255, 255, 170], [255, 200, 80], [60, 200, 255], [10, 65, 125], [0, 0, 255]], dtype='int32') def run_sda(datasets=None, batch_size=100, window_size=7, n_principle=4, pretraining_epochs=2000, pretrain_lr=0.02, training_epochs=10000, finetune_lr=0.008, hidden_layers_sizes=[310, 100], corruption_levels = [0., 0.]): """ This function maps spatial PCs to a deep representation. Parameters: datasets: A list containing 3 tuples. Each tuple have 2 entries, which are theano.shared variables. They stands for train, valid, test data. batch_size: Batch size. pretraining_epochs: Pretraining epoches. pretrain_lr: Pretraining learning rate. training_epochs: Fine-tuning epoches. finetune_lr: Fine-tuning learning rate. hidden_layers_sizes:A list containing integers. Each intger specifies a size of a hidden layer. corruption_levels: A list containing floats in the inteval [0, 1]. Each number specifies the corruption level of its corresponding hidden layer. Return: spatial_rep: 2-D numpy.array. Deep representation for each spatial sample. test_score: Accuracy this representations yield on the trained SdA. """ print 'finetuning learning rate=', finetune_lr print 'pretraining learning rate=', pretrain_lr print 'pretraining epoches=', pretraining_epochs print 'fine tuning epoches=', training_epochs print 'batch size=', batch_size print 'hidden layers sizes=', hidden_layers_sizes print 'corruption levels=', corruption_levels # compute number of minibatches for training, validation and testing n_train_batches = datasets[0][0].get_value(borrow=True).shape[0] n_train_batches /= batch_size # numpy random generator numpy_rng = numpy.random.RandomState(89677) print '... building the model' # construct the stacked denoising autoencoder class sda = SdA(numpy_rng=numpy_rng, n_ins=datasets[0][0].get_value(borrow=True).shape[1], hidden_layers_sizes=hidden_layers_sizes, n_outs=gnd_img.max()) ################################################################################ # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = sda.pretraining_functions(train_set_x=datasets[0][0], batch_size=batch_size) print '... pre-training the model' start_time = time.clock() ## Pre-train layer-wise for i in xrange(sda.n_layers): # go through pretraining epochs for epoch in xrange(pretraining_epochs): # go through the training set c = [] for batch_index in xrange(n_train_batches): c.append(pretraining_fns[i](index=batch_index, corruption=corruption_levels[i], lr=pretrain_lr)) if epoch % 100 == 0: print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) end_time = time.clock() print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) ################################################################################ # FINETUNING THE MODEL # ######################## # get the training, validation and testing function for the model print '... getting the finetuning functions' train_fn, validate_model, test_model = sda.build_finetune_functions( datasets=datasets, batch_size=batch_size, learning_rate=finetune_lr) print '... finetunning the model' # early-stopping parameters patience = 100 * n_train_batches # look as this many examples regardless patience_increase = 2. # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(10 * n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_params = None best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < training_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_fn(minibatch_index) iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: validation_losses = validate_model() this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if (this_validation_loss < best_validation_loss * improvement_threshold): patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = test_model() test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) end_time = time.clock() print(('Optimization complete with best validation score of %f %%,' 'with test performance %f %%') % (best_validation_loss * 100., test_score * 100.)) print >> sys.stdout, ('The training code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) # keep the following line consistent with line 227, function "prepare_data" filename = 'ksc_l1sda_pt%d_ft%d_lrp%.4f_f%.4f_bs%d_pca%d_ws%d' % \ (pretraining_epochs, training_epochs, pretrain_lr, finetune_lr, batch_size, n_principle, window_size) print '... classifying test set with learnt model:' pred_func = theano.function(inputs=[sda.x], outputs=sda.logLayer.y_pred) pred_test = pred_func(datasets[2][0].get_value(borrow=True)) true_test = datasets[2][1].get_value(borrow=True) true_valid = datasets[1][1].get_value(borrow=True) true_train = datasets[0][1].get_value(borrow=True) result_analysis(pred_test, true_train, true_valid, true_test) print '... classifying the whole image with learnt model:' print '...... extracting data' data_spectral, data_spatial, _, _ = \ T_pca_constructor(hsi_img=img, gnd_img=gnd_img, n_principle=n_principle, window_size=window_size, flag='unsupervised', merge=True) start_time = time.clock() print '...... begin ' y = pred_func(data_spectral) + 1 print '...... done ' end_time = time.clock() print 'finished, running time:%fs' % (end_time - start_time) y_rgb = cmap[y, :] margin = (window_size / 2) * 2 # floor it to a multiple of 2 y_image = y_rgb.reshape(width - margin, height - margin, 3) scipy.misc.imsave(filename + 'wholeimg.png' , y_image) print 'Saving classification results' sio.savemat(filename + 'wholeimg.mat', {'y': y.reshape(width - margin, height - margin)}) ############################################################################ print '... performing Student\'s t-test' best_c = 10000. best_g = 10. svm_classifier = svm.SVC(C=best_c, gamma=best_g, kernel='rbf') svm_classifier.fit(datasets[0][0].get_value(), datasets[0][1].get_value()) data = [numpy.vstack((datasets[1][0].get_value(), datasets[2][0].get_value())), numpy.hstack((datasets[1][1].get_value(), datasets[2][1].get_value()))] numpy_rng = numpy.random.RandomState(89677) num_test = 100 print 'Total number of tests: %d' % num_test k_sae = [] k_svm = [] for i in xrange(num_test): [_, _], [_, _], [test_x, test_y], _ = \ train_valid_test(data, ratio=[0, 1, 1], batch_size=1, random_state=numpy_rng.random_integers(1e10)) test_y = test_y + 1 # fix the label scale problem pred_y = pred_func(test_x) cm = confusion_matrix(test_y, pred_y) pr_a = cm.trace()*1.0 / test_y.size pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \ (cm.sum(axis=1)*1.0/test_y.size)).sum() k_sae.append( (pr_a - pr_e) / (1 - pr_e) ) pred_y = svm_classifier.predict(test_x) cm = confusion_matrix(test_y, pred_y) pr_a = cm.trace()*1.0 / test_y.size pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \ (cm.sum(axis=1)*1.0/test_y.size)).sum() k_svm.append( (pr_a - pr_e) / (1 - pr_e) ) std_k_sae = numpy.std(k_sae) std_k_svm = numpy.std(k_svm) mean_k_sae = numpy.mean(k_sae) mean_k_svm = numpy.mean(k_svm) left = ( (mean_k_sae - mean_k_svm) * numpy.sqrt(num_test*2-2)) \ / ( numpy.sqrt(2./num_test) * num_test * (std_k_sae**2 + std_k_svm**2) ) rv = t(num_test*2.0 - 2) right = rv.ppf(0.95) print '\tstd\t\tmean' print 'k_sae\t%f\t%f' % (std_k_sae, mean_k_sae) print 'k_svm\t%f\t%f' % (std_k_svm, mean_k_svm) if left > right: print 'left = %f, right = %f, test PASSED.' % (left, right) else: print 'left = %f, right = %f, test FAILED.' % (left, right) return test_score if __name__ == '__main__': print '... loanding data' hsi_file = u'/home/hantek/data/hsi_data/kennedy/KSC.mat' gnd_file = u'/home/hantek/data/hsi_data/kennedy/KSC_gt.mat' data = sio.loadmat(hsi_file) img = scale_to_unit_interval(data['KSC'].astype(theano.config.floatX)) width = img.shape[0] height = img.shape[1] bands = img.shape[2] data = sio.loadmat(gnd_file) gnd_img = data['KSC_gt'] gnd_img = gnd_img.astype(numpy.int32) print '... extracting train-valid-test sets' datasets, _, _, _ = \ prepare_data(hsi_img=img, gnd_img=gnd_img, merge=True, window_size=7, n_principle=3, batch_size=50) print '... Running hybrid feature extraction on SdA' spatial_accuracy = run_sda(datasets=datasets, batch_size=100, window_size=7, n_principle=3, pretraining_epochs=500, pretrain_lr=0.5, training_epochs=100000, finetune_lr=0.05, hidden_layers_sizes=[280, 100], corruption_levels = [0., 0.])
bsd-2-clause
henrykironde/scikit-learn
sklearn/neighbors/tests/test_dist_metrics.py
230
5234
import itertools import pickle import numpy as np from numpy.testing import assert_array_almost_equal import scipy from scipy.spatial.distance import cdist from sklearn.neighbors.dist_metrics import DistanceMetric from nose import SkipTest def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def cmp_version(version1, version2): version1 = tuple(map(int, version1.split('.')[:2])) version2 = tuple(map(int, version2.split('.')[:2])) if version1 < version2: return -1 elif version1 > version2: return 1 else: return 0 class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): np.random.seed(rseed) self.X1 = np.random.random((n1, d)).astype(dtype) self.X2 = np.random.random((n2, d)).astype(dtype) # make boolean arrays: ones and zeros self.X1_bool = self.X1.round(0) self.X2_bool = self.X2.round(0) V = np.random.random((d, d)) VI = np.dot(V, V.T) self.metrics = {'euclidean': {}, 'cityblock': {}, 'minkowski': dict(p=(1, 1.5, 2, 3)), 'chebyshev': {}, 'seuclidean': dict(V=(np.random.random(d),)), 'wminkowski': dict(p=(1, 1.5, 3), w=(np.random.random(d),)), 'mahalanobis': dict(VI=(VI,)), 'hamming': {}, 'canberra': {}, 'braycurtis': {}} self.bool_metrics = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def test_cdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X2, metric, **kwargs) yield self.check_cdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X2_bool, metric) yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) assert_array_almost_equal(D12, D_true) def check_cdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool, self.X2_bool) assert_array_almost_equal(D12, D_true) def test_pdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X1, metric, **kwargs) yield self.check_pdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X1_bool, metric) yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) assert_array_almost_equal(D12, D_true) def check_pdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool) assert_array_almost_equal(D12, D_true) def test_haversine_metric(): def haversine_slow(x1, x2): return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2 + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2)) X = np.random.random((10, 2)) haversine = DistanceMetric.get_metric("haversine") D1 = haversine.pairwise(X) D2 = np.zeros_like(D1) for i, x1 in enumerate(X): for j, x2 in enumerate(X): D2[i, j] = haversine_slow(x1, x2) assert_array_almost_equal(D1, D2) assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2) def test_pyfunc_metric(): X = np.random.random((10, 3)) euclidean = DistanceMetric.get_metric("euclidean") pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2) # Check if both callable metric and predefined metric initialized # DistanceMetric object is picklable euclidean_pkl = pickle.loads(pickle.dumps(euclidean)) pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc)) D1 = euclidean.pairwise(X) D2 = pyfunc.pairwise(X) D1_pkl = euclidean_pkl.pairwise(X) D2_pkl = pyfunc_pkl.pairwise(X) assert_array_almost_equal(D1, D2) assert_array_almost_equal(D1_pkl, D2_pkl)
bsd-3-clause
Djabbz/scikit-learn
sklearn/gaussian_process/gpc.py
9
31542
"""Gaussian processes classification.""" # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve from scipy.optimize import fmin_l_bfgs_b from scipy.special import erf from sklearn.base import BaseEstimator, ClassifierMixin, clone from sklearn.gaussian_process.kernels \ import RBF, CompoundKernel, ConstantKernel as C from sklearn.utils.validation import check_X_y, check_is_fitted, check_array from sklearn.utils import check_random_state from sklearn.preprocessing import LabelEncoder from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier # Values required for approximating the logistic sigmoid by # error functions. coefs are obtained via: # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) # b = logistic(x) # A = (erf(np.dot(x, self.lambdas)) + 1) / 2 # coefs = lstsq(A, b)[0] LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654])[:, np.newaxis] class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): """Binary Gaussian process classification based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of ``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer: int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict: int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_: array-like, shape = (n_samples,) Target values in training data (also required for prediction) classes_ : array-like, shape = (n_classes,) Unique class labels. kernel_: kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_: array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in X_train_ pi_: array-like, shape = (n_samples,) The probabilities of the positive class for the training points X_train_ W_sr_: array-like, shape = (n_samples,) Square root of W, the Hessian of log-likelihood of the latent function values for the observed labels. Since W is diagonal, only the diagonal of sqrt(W) is stored. log_marginal_likelihood_value_: float The log-marginal-likelihood of self.kernel_.theta """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self.rng = check_random_state(self.random_state) self.X_train_ = np.copy(X) if self.copy_X_train else X # Encode class labels and check that it is a binary classification # problem label_encoder = LabelEncoder() self.y_train_ = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ if self.classes_.size > 2: raise ValueError("%s supports only binary classification. " "y contains classes %s" % (self.__class__.__name__, self.classes_)) elif self.classes_.size == 1: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds)] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) _, (self.pi_, self.W_sr_, self.L_, _, _) = \ self._posterior_mode(K, return_temporaries=True) return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from classes_ """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # As discussed on Section 3.4.2 of GPML, for making hard binary # decisions, it is enough to compute the MAP of the posterior and # pass it through the link function K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 return np.where(f_star > 0, self.classes_[1], self.classes_[0]) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # Based on Algorithm 3.2 of GPML K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 # Line 6 (compute np.diag(v.T.dot(v)) via einsum) var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) # Line 7: # Approximate \int log(z) * N(z | f_star, var_f_star) # Approximation is due to Williams & Barber, "Bayesian Classification # with Gaussian Processes", Appendix A: Approximate the logistic # sigmoid by a linear combination of 5 error functions. # For information on how this integral can be computed see # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html alpha = 1 / (2 * var_f_star) gamma = LAMBDAS * f_star integrals = np.sqrt(np.pi / alpha) \ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \ / (2 * np.sqrt(var_f_star * 2 * np.pi)) pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum() return np.vstack((1 - pi_star, pi_star)).T def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of self.kernel_.theta is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Compute log-marginal-likelihood Z and also store some temporaries # which can be reused for computing Z's gradient Z, (pi, W_sr, L, b, a) = \ self._posterior_mode(K, return_temporaries=True) if not eval_gradient: return Z # Compute gradient based on Algorithm 5.1 of GPML d_Z = np.empty(theta.shape[0]) # XXX: Get rid of the np.diag() in the next line R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \ * (pi * (1 - pi) * (1 - 2 * pi)) # third derivative for j in range(d_Z.shape[0]): C = K_gradient[:, :, j] # Line 11 # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel()) b = C.dot(self.y_train_ - pi) # Line 13 s_3 = b - K.dot(R.dot(b)) # Line 14 d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 return Z, d_Z def _posterior_mode(self, K, return_temporaries=False): """Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton's iteration to find the mode of this approximation. """ # Based on Algorithm 3.1 of GPML # If warm_start are enabled, we reuse the last solution for the # posterior mode as initialization; otherwise, we initialize with 0 if self.warm_start and hasattr(self, "f_cached") \ and self.f_cached.shape == self.y_train_.shape: f = self.f_cached else: f = np.zeros_like(self.y_train_, dtype=np.float64) # Use Newton's iteration method to find mode of Laplace approximation log_marginal_likelihood = -np.inf for _ in range(self.max_iter_predict): # Line 4 pi = 1 / (1 + np.exp(-f)) W = pi * (1 - pi) # Line 5 W_sr = np.sqrt(W) W_sr_K = W_sr[:, np.newaxis] * K B = np.eye(W.shape[0]) + W_sr_K * W_sr L = cholesky(B, lower=True) # Line 6 b = W * f + (self.y_train_ - pi) # Line 7 a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) # Line 8 f = K.dot(a) # Line 10: Compute log marginal likelihood in loop and use as # convergence criterion lml = -0.5 * a.T.dot(f) \ - np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \ - np.log(np.diag(L)).sum() # Check if we have converged (log marginal likelihood does # not decrease) # XXX: more complex convergence criterion if lml - log_marginal_likelihood < 1e-10: break log_marginal_likelihood = lml self.f_cached = f # Remember solution for later warm-starts if return_temporaries: return log_marginal_likelihood, (pi, W_sr, L, b, a) else: return log_marginal_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min class GaussianProcessClassifier(BaseEstimator, ClassifierMixin): """Gaussian process classification (GPC) based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of ``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. For multi-class classification, several binary one-versus rest classifiers are fitted. Note that this class thus does not implement a true multi-class Laplace approximation. Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer: int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict: int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. multi_class: string, default: "one_vs_rest" Specifies how multi-class classification problems are handled. Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest", one binary Gaussian process classifier is fitted for each class, which is trained to separate this class from the rest. In "one_vs_one", one binary Gaussian process classifier is fitted for each pair of classes, which is trained to separate these two classes. The predictions of these binary predictors are combined into multi-class predictions. Note that "one_vs_one" does not support predicting probability estimates. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- kernel_ : kernel object The kernel used for prediction. In case of binary classification, the structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters. In case of multi-class classification, a CompoundKernel is returned which consists of the different kernels used in the one-versus-rest classifiers. log_marginal_likelihood_value_: float The log-marginal-likelihood of self.kernel_.theta classes_ : array-like, shape = (n_classes,) Unique class labels. n_classes_ : int The number of classes in the training data """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class="one_vs_rest", n_jobs=1): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state self.multi_class = multi_class self.n_jobs = n_jobs def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, multi_output=False) self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( self.kernel, self.optimizer, self.n_restarts_optimizer, self.max_iter_predict, self.warm_start, self.copy_X_train, self.random_state) self.classes_ = np.unique(y) self.n_classes_ = self.classes_.size if self.n_classes_ == 1: raise ValueError("GaussianProcessClassifier requires 2 or more " "distinct classes. Only class %s present." % self.classes_[0]) if self.n_classes_ > 2: if self.multi_class == "one_vs_rest": self.base_estimator_ = \ OneVsRestClassifier(self.base_estimator_, n_jobs=self.n_jobs) elif self.multi_class == "one_vs_one": self.base_estimator_ = \ OneVsOneClassifier(self.base_estimator_, n_jobs=self.n_jobs) else: raise ValueError("Unknown multi-class mode %s" % self.multi_class) self.base_estimator_.fit(X, y) if self.n_classes_ > 2: self.log_marginal_likelihood_value_ = np.mean( [estimator.log_marginal_likelihood() for estimator in self.base_estimator_.estimators_]) else: self.log_marginal_likelihood_value_ = \ self.base_estimator_.log_marginal_likelihood() return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from classes_ """ check_is_fitted(self, ["classes_", "n_classes_"]) X = check_array(X) return self.base_estimator_.predict(X) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self, ["classes_", "n_classes_"]) if self.n_classes_ > 2 and self.multi_class == "one_vs_one": raise ValueError("one_vs_one multi-class mode does not support " "predicting probability estimates. Use " "one_vs_rest mode instead.") X = check_array(X) return self.base_estimator_.predict_proba(X) @property def kernel_(self): if self.n_classes_ == 2: return self.base_estimator_.kernel_ else: return CompoundKernel( [estimator.kernel_ for estimator in self.base_estimator_.estimators_]) def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or none Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of self.kernel_.theta is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ check_is_fitted(self, ["classes_", "n_classes_"]) if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ theta = np.asarray(theta) if self.n_classes_ == 2: return self.base_estimator_.log_marginal_likelihood( theta, eval_gradient) else: if eval_gradient: raise NotImplementedError( "Gradient of log-marginal-likelhood not implemented for " "multi-class GPC.") estimators = self.base_estimator_.estimators_ n_dims = estimators[0].kernel_.n_dims if theta.shape[0] == n_dims: # use same theta for all sub-kernels return np.mean( [estimator.log_marginal_likelihood(theta) for i, estimator in enumerate(estimators)]) elif theta.shape[0] == n_dims * self.classes_.shape[0]: # theta for compound kernel return np.mean( [estimator.log_marginal_likelihood( theta[n_dims*i:n_dims*(i+1)]) for i, estimator in enumerate(estimators)]) else: raise ValueError("Shape of theta must be either %d or %d. " "Obtained theta with shape %d." % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]))
bsd-3-clause
Rinoahu/POEM
deprecate/lib/deep_operon.py
1
21697
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # CreateTime: 2016-09-21 16:51:48 import numpy as np from Bio import SeqIO, Seq, SeqUtils #from Bio.SeqUtils.CodonUsage import CodonAdaptationIndex from Bio.SeqUtils import GC from Bio.SeqUtils.CodonUsage import SynonymousCodons import math from math import log, sqrt from collections import Counter import pickle from sklearn import cross_validation, metrics # Additional scklearn functions #from sklearn.grid_search import GridSearchCV # Perforing grid search from keras import backend as K def f1_score(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall)) ########################################################################## # overlap of two gene ########################################################################## overlap = lambda s0, e0, s1, e1: min(e0, e1) - max(s0, s1) + 1 ########################################################################## # share kmer between 2 sequence ########################################################################## def pearson(x, y): N, M = len(x), len(y) assert N == M x_m, y_m = sum(x) * 1. / N, sum(y) * 1. / M a, b, c = 0., 0., 0. for i in xrange(N): xi, yi = x[i] - x_m, y[i] - y_m a += xi * yi b += xi ** 2 c += yi ** 2 try: return a / sqrt(b * c) except: return 0 def sharekmer(s1, s2): # SynonymousCodons n1, n2 = map(len, [s1, s2]) k1 = [s1[elem: elem + 3] for elem in xrange(0, n1, 3)] k2 = [s1[elem: elem + 3] for elem in xrange(0, n2, 3)] fq1 = Counter(k1) fq2 = Counter(k2) flag = 0 kmers = [] for i in SynonymousCodons: j = SynonymousCodons[i] if len(j) < 2: continue c1 = [[fq1[elem], elem] for elem in j] best1 = max(c1, key=lambda x: x[0]) c2 = [[fq2[elem], elem] for elem in j] best2 = max(c2, key=lambda x: x[0]) #c1.sort(key = lambda x: x[0], reverse = True) #c2.sort(key = lambda x: x[0], reverse = True) if best1[1] == best2[1]: kmers.append(best1[1]) # for val in SynonymousCodons.values(): # if len(val) > 5: # kmers.extend(val) # print 'the kmer', kmers, len(kmers) vec1 = [fq1[elem] for elem in kmers] vec2 = [fq2[elem] for elem in kmers] return pearson(vec1, vec2) ########################################################################## # the motif found ########################################################################## box_up10 = ['TATAAT', [77, 76, 60, 61, 56, 82]] box_up35 = ['TTGACA', [69, 79, 61, 56, 54, 54]] # find the best region that may be a candidate of a motif def find_motif(seq, motif, bg=None): if bg is None: bg = {} l = len(motif[0]) #best = float('-inf') best = -100 idx = -1 for i in xrange(0, len(seq) - l + 1): lmer = seq[i: i + l] score = 0 for a, b, c in zip(lmer, motif[0], motif[1]): if a == b: score += log(float(c) / bg.get(a, 1.)) else: score += log((100. - c) / bg.get(a, 1.)) # try: # score += log((100. - c) / bg.get(a, 1.)) # except: # print c, bg.get(a, 1.) if score >= best: idx = i best = score return [seq[idx: idx + l], len(seq) - idx, best] ########################################################################## # cai, from biopython ########################################################################## index = Counter({'GCT': 1, 'CGT': 1, 'AAC': 1, 'GAC': 1, 'TGC': 1, 'CAG': 1, 'GAA': 1, 'GGT': 1, 'CAC': 1, 'ATC': 1, 'CTG': 1, 'AAA': 1, 'ATG': 1, 'TTC': 1, 'CCG': 1, 'TCT': 1, 'ACC': 1, 'TGG': 1, 'TAC': 1, 'GTT': 1, 'ACT': 0.965, 'TCC': 0.744, 'GGC': 0.724, 'GCA': 0.586, 'TGT': 0.5, 'GTA': 0.495, 'GAT': 0.434, 'GCG': 0.424, 'AGC': 0.41, 'CGC': 0.356, 'TTT': 0.296, 'CAT': 0.291, 'GAG': 0.259, 'AAG': 0.253, 'TAT': 0.239, 'GTG': 0.221, 'ATT': 0.185, 'CCA': 0.135, 'CAA': 0.124, 'GCC': 0.122, 'ACG': 0.099, 'AGT': 0.085, 'TCA': 0.077, 'ACA': 0.076, 'CCT': 0.07, 'GTC': 0.066, 'AAT': 0.051, 'CTT': 0.042, 'CTC': 0.037, 'TTA': 0.02, 'TTG': 0.02, 'GGG': 0.019, 'TCG': 0.017, 'CCC': 0.012, 'GGA': 0.01, 'CTA': 0.007, 'AGA': 0.004, 'CGA': 0.004, 'CGG': 0.004, 'ATA': 0.003, 'AGG': 0.002}) def cai(seq): if seq.islower(): seq = seq.upper() N = len(seq) cai_value, cai_length = 0, 0 for i in xrange(0, N, 3): codon = seq[i: i + 3] if codon in index: if codon not in ['ATG', 'TGG']: cai_value += math.log(index[codon]) cai_length += 1 elif codon not in ['TGA', 'TAA', 'TAG']: continue else: continue if cai_length > 0: return math.exp(cai_value / cai_length) else: return 0 ########################################################################## # get the features ########################################################################## # convert ATCG based kmer number #code = {'A': 1, 'a': 1, 'T': 2, 't': 2, 'G': 3, 'g': 3, 'C': 4, 'c': 4} code = [0] * 256 code5 = [0] * 256 flag = 0 for i in 'ATGC': code[ord(i.lower())] = code[ord(i)] = flag code5[ord(i.lower())] = code5[ord(i)] = flag + 1 flag += 1 # convert string to number def s2n(s, code=code, scale=None): if scale == None: scale = max(code) + 1 N = 0 output = 0 for i in s[::-1]: #output += code.get(i, 0) * scale ** N output += code[ord(i)] * scale ** N N += 1 return output # reverse of s2n def n2s(n, length, alpha='ATGC', scale=None): if scale == None: scale = max(code) + 1 N = n s = [] for i in xrange(length): s.append(alpha[N % scale]) N /= scale return ''.join(s[::-1]) # convert the dna sequence to kmer-position matrix. # if length of dna < given, then add NNN in the center of the sequence. # else if length of dna > given, then trim the center of the sequence. # the new kpm, reshape def kpm(S, d=64, k=3, code=code, scale=None): if scale == None: scale = max(code) + 1 N = scale ** k assert isinstance(d, int) L = len(S) if d < L: F = d // 2 R = d - F seq = ''.join([S[: F], S[-R:]]) elif d > L: F = L // 2 R = L - F seq = ''.join([S[: F], 'N' * (d - L), S[-R:]]) else: seq = S mat = [[0] * (d // 3) for elem in xrange(N * 3)] for i in xrange(0, d - k + 1): kmer = seq[i: i + k] if 'N' in kmer or 'n' in kmer: continue R = s2n(kmer, code=code, scale=scale) mat[R + i % 3 * N][i // 3] = 1 mat = np.asarray(mat, 'int8') return mat # get features by give loc1, start and end: # get xx def get_xx(j, seq_dict, kmer=2, dim=128, mode='train', context=False): loc1, scf1, std1, st1, ed1, loc2, scf2, std2, st2, ed2 = j[: 10] if scf1 != scf2 or std1 != std2: if context: X0 = np.ones((4 ** kmer * 3, dim // 3 * 3)) else: X0 = np.ones((4 ** kmer * 3, dim // 3)) X1 = [10**4] * 11 X2 = [127] * dim return [X0], X1, X2 # get the sequence st1, ed1, st2, ed2 = map(int, [st1, ed1, st2, ed2]) st1 -= 1 st2 -= 1 if st1 > st2: loc1, scf1, std1, st1, ed1, loc2, scf2, std2, st2, ed2 = loc2, scf2, std2, st2, ed2, loc1, scf1, std1, st1, ed1 seq1 = seq_dict[scf1][st1: ed1] seq1 = std1 == '+' and seq1 or seq1.reverse_complement() seq2 = seq_dict[scf2][st2: ed2] seq2 = std1 == '+' and seq2 or seq2.reverse_complement() start, end = ed1, st2 seq12 = seq_dict[scf1][start: end] seq12 = std1 == '+' and seq12 or seq12.reverse_complement() seq1, seq2, seq12 = map(str, [seq1.seq, seq2.seq, seq12.seq]) seq1, seq2, seq12 = seq1.upper(), seq2.upper(), seq12.upper() # 1D features such as gc, dist cai1, cai2, cai12 = map(cai, [seq1, seq2, seq12]) dist = st2 - ed1 distn = (st2 - ed1) * 1. / (ed2 - st1) ratio = math.log((ed1 - st1) * 1. / (ed2 - st2)) ratio = std1 == '+' and ratio or -ratio idx = -100 bgs = Counter(seq12[idx:]) up10, up35 = find_motif(seq12[idx:], box_up10, bgs), find_motif( seq12[idx:], box_up35, bgs) if seq12[idx:]: gc = SeqUtils.GC(seq12[idx:]) try: skew = SeqUtils.GC_skew(seq12[idx:])[0] except: skew = 0. else: gc = skew = 0. bias = sharekmer(seq1, seq2) if st1 == st2 == '+': X1 = [cai1, cai2, bias, distn, ratio, gc, skew] + up10[1:] + up35[1:] else: X1 = [cai2, cai1, bias, distn, ratio, gc, skew] + up10[1:] + up35[1:] # 2D features of kmer matrix if context: seqmat12 = kpm(seq12, d=dim, k=kmer, scale=4) seqmat1 = kpm(seq1, d=dim, k=kmer, scale=4) seqmat2 = kpm(seq2, d=dim, k=kmer, scale=4) seqmat = np.concatenate((seqmat1, seqmat12, seqmat2), 1) else: seqmat = kpm(seq12, d=dim, k=kmer, scale=4) if ed1 > st2: seqmat[:] = 0 X0 = [seqmat] n12 = len(seq12) X2 = [s2n(seq12[elem: elem + kmer], code5) for elem in xrange(n12 - kmer + 1)] return X0, X1, X2 # get single line of features def get_xx_one(j, seq_dict, kmer = 2, dim = 128, mode = 'train'): X0, X1, X2 = get_xx(j, seq_dict, kmer, dim, mode) x0, x1, x2 = map(np.asarray, [[X0], [X1], [X2]]) return x0, x1, X2 # generate training and testing data def get_xxy(f, seq_dict, kmer = 2, dim = 128): # get the training data X0, X1, X2, y = [], [], [], [] for i in f: j = i[:-1].split('\t') x0, x1, x2 = get_xx(j, seq_dict, kmer, dim) X0.append(x0) X1.append(x1) X2.append(x2) y.append(j[-1] == 'True' and 1 or 0) X0 = np.asarray(X0, 'int8') X1 = np.asarray(X1, 'float32') X2 = np.asarray(X2) y = np.asarray(y, 'int8') return X0, X1, X2, y # split the X0, X1, y data to training and testing def split_xxy(X0, X1, X2, y, train_size=1. / 3, seed=42): N = X0.shape[0] idx = np.arange(N) np.random.seed(seed) np.random.shuffle(idx) start = int(train_size * N) idx_train, idx_test = idx[: start], idx[start:] X0_train, X1_train, X2_train, y_train = X0[ idx_train], X1[idx_train], X2[idx_train], y[idx_train] X0_test, X1_test, X2_test, y_test = X0[idx_test], X1[ idx_test], X2[idx_test], y[idx_test] return X0_train, X1_train, X2_train, y_train, X0_test, X1_test, X2_test, y_test ########################################################################## # the CNN class ########################################################################## class CNN: def __init__(self, nb_filter=64, nb_pool=3, nb_conv=2, nb_epoch=10, batch_size=64, maxlen=128, save_path='./weights.hdf5'): self.nb_filter = nb_filter self.nb_pool = nb_pool self.nb_conv = nb_conv self.nb_epoch = nb_epoch self.batch_size = batch_size self.maxlen = maxlen self.opt = Adam(lr=5e-4, beta_1=0.995, beta_2=0.999, epsilon=1e-09) self.checkpointer = [ModelCheckpoint(filepath=save_path, verbose=1, save_best_only=True, mode='max', monitor='val_fbeta_score')] self.metric = keras.metrics.fbeta_score #self.metric = f1_score self.cross_val = 1 / 3. def fit_2d(self, X_train, y_train, X_test=None, y_test=None): Y_train = np_utils.to_categorical(y_train) if type(y_test) == type(None): Y_test = None else: Y_test = np_utils.to_categorical(y_test) nb_classes = Y_train.shape[1] # set parameter for cnn loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy' print 'loss function is', loss # number of convolutional filters to use nb_filters = self.nb_filter # size of pooling area for max pooling nb_pool = self.nb_pool # convolution kernel size nb_conv = self.nb_conv # traning iteration nb_epoch = self.nb_epoch batch_size = self.batch_size a, b, img_rows, img_cols = X_train.shape # set the conv model model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(b, img_rows, img_cols), activation='relu', name='conv1_1')) #model.add(Conv2D(64, (2, 2), padding="same", activation="relu", name="conv1_1", input_shape=(1, 192, 4))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1')) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='sigmoid')) opt = self.opt model.compile(loss=loss, optimizer='adam', metrics=[self.metric]) # set the check pointer to save the best model if type(X_test) != type(None) and type(Y_test) != type(None): model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, Y_test), shuffle=True, validation_split=1e-4, callbacks=self.checkpointer) else: model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=True, validation_split = self.cross_val, callbacks=self.checkpointer) self.model_2d = model def predict_2d(self, X): return self.model_2d.predict(X).argmax(1) # load an training model def load(self, name, mode='2d'): model = keras.models.load_model(name) if mode == '2d': self.model_2d = model else: pass # save the model def save(self, name, model='2d'): if model == '2d': self.model_2d.save(name+'_'+model) else: pass # run training def run_train(train, seq_dict, clf, mode='2d'): # get the training data split_rate = 1. / 3 if mode == '2d': f = open(train, 'r') X, X1, X2, y = get_xxy(f, seq_dict, 3, 128) X_train, X1_train, X2_train, y_train, X_test, X1_test, X2_test, y_test = split_xxy( X, X1, X2, y, split_rate) f.close() clf.fit_2d(X_train, y_train, X_test, y_test) # the test score Y_test = np_utils.to_categorical(y_test) score = clf.model_2d.evaluate(X_test, Y_test, verbose=0) print(' Test score:', score[0]) print('Test accuracy:', score[1]) # validate y_test_pred = clf.predict_2d(X_test) #clf.save(train, mode) precise = metrics.precision_score(y_test, y_test_pred) recall = metrics.recall_score(y_test, y_test_pred) f1 = metrics.f1_score(y_test, y_test_pred) print 'Precise:', precise print ' Recall:', recall print ' F1:', f1 # run the adjacent prediction def run_adjacent_predict(adjacent, seq_dict, model, clf, mode='2d'): adjacent, model = sys.argv[3: 5] seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta')) clf.load(model, mode) # get the locus of genes f = open(adjacent, 'r') for i in f: j = i[:-1].split('\t') x0, x1, x2 = get_xx_one(j, seq_dict, 3, 128, 'test') # print 'data shape', x0.shape, x1.shape if mode == '2d': res = clf.predict_2d(x0)[0] else: pass res = res == 1 and 'True' or 'False' print i[: -1] + '\t' + str(res) f.close() # run the whole genome prediction # generate adjacent gene pairs from the gene list def adjacent_genes(f): locus_list = [] for i in f: j = i[: -1].split('\t') if len(j) < 7: j.extend([0] * 7) locus, scaf, strand, start, end = j[: 5] start, end = map(int, [start, end]) locus_list.append([locus, scaf, strand, start, end]) locus_list.sort(key=lambda x: x[1: 5]) return locus_list def run_genome_predict(genome, seq_dict, model, clf, mode='2d'): genome, model = sys.argv[3: 5] seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta')) clf.load(model, mode) # get the locus of genes f = open(genome, 'r') locus_list = adjacent_genes(f) f.close() for a, b in zip(locus_list[: -1], locus_list[1:]): j = a + b x0, x1, x2 = get_xx_one(j, seq_dict, 3, 128, 'test') if mode == '2d': if a[1] == b[1] and a[2] == b[2]: res = clf.predict_2d(x0)[0] else: res = 0 else: pass res = res == 1 and 'True' or 'False' i = '\t'.join(map(str, j)) print i + '\t' + str(res) if __name__ == '__main__': import sys if len(sys.argv[1:]) < 3: print '#' * 79 print '# To train a model:' print '#' * 79 print 'python this.py train foo.fasta foo.train.txt [mode]\n' print 'foo.train.txt is the gene location in the format:' print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\tcat\n' print '#' * 79 print '# To make a adjacent genes prediction' print '#' * 79 print 'python this.py adjacent foo.fasta foo.adjacent.txt foo.model [mode]\n' print 'foo.adjacent.txt is the gene location in the format:' print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\n' print '#' * 79 print '# To make a whole genome prediction' print '#' * 79 print 'python this.py genome foo.fasta foo.genome.txt foo.model [mode]' print 'foo.genome.txt is the gene location in the format:' print ' locus1\tscf1\tstrand1\tstart1\tend1' print '' print '#' * 79 print 'start1/2: start of the gene in the genome, start > 0 need be adjust in the program' print ' cat: indicate whether operon or not' print ' mode: 2d' raise SystemExit() import keras from keras.models import Sequential from keras.preprocessing import sequence from keras.layers import Dense, Dropout, Activation, Flatten, Embedding from keras.layers import Input, Merge, LSTM, GRU, Bidirectional, UpSampling2D, InputLayer from keras.optimizers import SGD, Adam, RMSprop from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, Conv2D from keras.utils import np_utils from keras.callbacks import ModelCheckpoint, TensorBoard from keras.models import Model from keras import backend as K from keras import objectives from keras.layers import Input, Dense, Lambda import numpy as np model, fasta = sys.argv[1: 3] # save the genome to an dict seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta')) if model.startswith('train'): train = sys.argv[3] try: mode = sys.argv[4] except: mode = '2d' clf = CNN(nb_epoch = 16, maxlen = 128, save_path = train + '_' + mode + '.hdf5') run_train(train, seq_dict, clf, mode) elif model.startswith('predict'): if len(sys.argv[1:]) < 4: print '#' * 79 print '# To make a adjacent genes prediction' print '#' * 79 print 'python this.py predict foo.fasta foo.adjacent.txt foo.model\n' print 'foo.adjacent.txt is the gene location in the format:' print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\n' raise SystemExit() test, model = sys.argv[3: 5] try: mode = sys.argv[5] except: mode = '2d' clf = CNN(nb_epoch = 128, maxlen = 128) # determine the number of col f = open(test, 'r') header = f.next().split('\t') f.close() if header.count('+') + header.count('-') > 1: run_adjacent_predict(test, seq_dict, model, clf, mode) else: run_genome_predict(test, seq_dict, model, clf, mode) else: pass
gpl-3.0
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/indexes/numeric.py
1
12538
import numpy as np import pandas.lib as lib import pandas.algos as _algos import pandas.index as _index from pandas import compat from pandas.indexes.base import Index, InvalidIndexError from pandas.util.decorators import Appender, cache_readonly import pandas.core.common as com from pandas.core.common import (is_dtype_equal, isnull, pandas_dtype, is_float_dtype, is_object_dtype, is_integer_dtype) import pandas.indexes.base as ibase class NumericIndex(Index): """ Provide numeric type operations This is an abstract class """ _is_numeric_dtype = True def _maybe_cast_slice_bound(self, label, side, kind): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ assert kind in ['ix', 'loc', 'getitem', None] # we will try to coerce to integers return self._maybe_cast_indexer(label) def _convert_tolerance(self, tolerance): try: return float(tolerance) except ValueError: raise ValueError('tolerance argument for %s must be numeric: %r' % (type(self).__name__, tolerance)) class Int64Index(NumericIndex): """ Immutable ndarray implementing an ordered, sliceable set. The basic object storing axis labels for all pandas objects. Int64Index is a special case of `Index` with purely integer labels. This is the default index type used by the DataFrame and Series ctors when no explicit index is provided by the user. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: int64) copy : bool Make a copy of input ndarray name : object Name to be stored in the index Notes ----- An Index instance can **only** contain hashable objects """ _typ = 'int64index' _groupby = _algos.groupby_int64 _arrmap = _algos.arrmap_int64 _left_indexer_unique = _algos.left_join_indexer_unique_int64 _left_indexer = _algos.left_join_indexer_int64 _inner_indexer = _algos.inner_join_indexer_int64 _outer_indexer = _algos.outer_join_indexer_int64 _can_hold_na = False _engine_type = _index.Int64Engine def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs): if fastpath: return cls._simple_new(data, name=name) # isscalar, generators handled in coerce_to_ndarray data = cls._coerce_to_ndarray(data) if issubclass(data.dtype.type, compat.string_types): cls._string_data_error(data) elif issubclass(data.dtype.type, np.integer): dtype = np.int64 subarr = np.array(data, dtype=dtype, copy=copy) else: subarr = np.array(data, dtype=np.int64, copy=copy) if len(data) > 0: if (subarr != data).any(): raise TypeError('Unsafe NumPy casting to integer, you must' ' explicitly cast') return cls._simple_new(subarr, name=name) @property def inferred_type(self): return 'integer' @property def asi8(self): # do not cache or you'll create a memory leak return self.values.view('i8') @property def is_all_dates(self): """ Checks that all the labels are datetime objects """ return False def _convert_scalar_indexer(self, key, kind=None): """ convert a scalar indexer Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem'} or None """ assert kind in ['ix', 'loc', 'getitem', 'iloc', None] # don't coerce ilocs to integers if kind != 'iloc': key = self._maybe_cast_indexer(key) return (super(Int64Index, self) ._convert_scalar_indexer(key, kind=kind)) def equals(self, other): """ Determines if two Index objects contain the same elements. """ if self.is_(other): return True try: return com.array_equivalent(com._values_from_object(self), com._values_from_object(other)) except TypeError: # e.g. fails in numpy 1.6 with DatetimeIndex #1681 return False def _wrap_joined_index(self, joined, other): name = self.name if self.name == other.name else None return Int64Index(joined, name=name) Int64Index._add_numeric_methods() Int64Index._add_logical_methods() class Float64Index(NumericIndex): """ Immutable ndarray implementing an ordered, sliceable set. The basic object storing axis labels for all pandas objects. Float64Index is a special case of `Index` with purely floating point labels. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) copy : bool Make a copy of input ndarray name : object Name to be stored in the index Notes ----- An Float64Index instance can **only** contain hashable objects """ _typ = 'float64index' _engine_type = _index.Float64Engine _groupby = _algos.groupby_float64 _arrmap = _algos.arrmap_float64 _left_indexer_unique = _algos.left_join_indexer_unique_float64 _left_indexer = _algos.left_join_indexer_float64 _inner_indexer = _algos.inner_join_indexer_float64 _outer_indexer = _algos.outer_join_indexer_float64 def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs): if fastpath: return cls._simple_new(data, name) data = cls._coerce_to_ndarray(data) if issubclass(data.dtype.type, compat.string_types): cls._string_data_error(data) if dtype is None: dtype = np.float64 dtype = np.dtype(dtype) # allow integer / object dtypes to be passed, but coerce to float64 if dtype.kind in ['i', 'O', 'f']: dtype = np.float64 else: raise TypeError("cannot support {0} dtype in " "Float64Index".format(dtype)) try: subarr = np.array(data, dtype=dtype, copy=copy) except: raise TypeError('Unsafe NumPy casting, you must explicitly cast') # coerce to float64 for storage if subarr.dtype != np.float64: subarr = subarr.astype(np.float64) return cls._simple_new(subarr, name) @property def inferred_type(self): return 'floating' def astype(self, dtype): dtype = pandas_dtype(dtype) if is_float_dtype(dtype) or is_integer_dtype(dtype): values = self._values.astype(dtype) elif is_object_dtype(dtype): values = self._values else: raise TypeError('Setting %s dtype to anything other than ' 'float64 or object is not supported' % self.__class__) return Index(values, name=self.name, dtype=dtype) def _convert_scalar_indexer(self, key, kind=None): """ convert a scalar indexer Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem'} or None """ assert kind in ['ix', 'loc', 'getitem', 'iloc', None] if kind == 'iloc': return self._validate_indexer('positional', key, kind) return key def _convert_slice_indexer(self, key, kind=None): """ convert a slice indexer, by definition these are labels unless we are iloc Parameters ---------- key : label of the slice bound kind : optional, type of the indexing operation (loc/ix/iloc/None) """ # if we are not a slice, then we are done if not isinstance(key, slice): return key if kind == 'iloc': return super(Float64Index, self)._convert_slice_indexer(key, kind=kind) # translate to locations return self.slice_indexer(key.start, key.stop, key.step, kind=kind) def _format_native_types(self, na_rep='', float_format=None, decimal='.', quoting=None, **kwargs): from pandas.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter(self.values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False) return formatter.get_result_as_array() def get_value(self, series, key): """ we always want to get an index value, never a value """ if not lib.isscalar(key): raise InvalidIndexError from pandas.core.indexing import maybe_droplevels from pandas.core.series import Series k = com._values_from_object(key) loc = self.get_loc(k) new_values = com._values_from_object(series)[loc] if lib.isscalar(new_values) or new_values is None: return new_values new_index = self[loc] new_index = maybe_droplevels(new_index, k) return Series(new_values, index=new_index, name=series.name) def equals(self, other): """ Determines if two Index objects contain the same elements. """ if self is other: return True # need to compare nans locations and make sure that they are the same # since nans don't compare equal this is a bit tricky try: if not isinstance(other, Float64Index): other = self._constructor(other) if (not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape): return False left, right = self._values, other._values return ((left == right) | (self._isnan & other._isnan)).all() except TypeError: # e.g. fails in numpy 1.6 with DatetimeIndex #1681 return False def __contains__(self, other): if super(Float64Index, self).__contains__(other): return True try: # if other is a sequence this throws a ValueError return np.isnan(other) and self.hasnans except ValueError: try: return len(other) <= 1 and ibase._try_get_item(other) in self except TypeError: return False except: return False def get_loc(self, key, method=None, tolerance=None): try: if np.all(np.isnan(key)): nan_idxs = self._nan_idxs try: return nan_idxs.item() except (ValueError, IndexError): # should only need to catch ValueError here but on numpy # 1.7 .item() can raise IndexError when NaNs are present return nan_idxs except (TypeError, NotImplementedError): pass return super(Float64Index, self).get_loc(key, method=method, tolerance=tolerance) @property def is_all_dates(self): """ Checks that all the labels are datetime objects """ return False @cache_readonly def is_unique(self): return super(Float64Index, self).is_unique and self._nan_idxs.size < 2 @Appender(Index.isin.__doc__) def isin(self, values, level=None): value_set = set(values) if level is not None: self._validate_index_level(level) return lib.ismember_nans(np.array(self), value_set, isnull(list(value_set)).any()) Float64Index._add_numeric_methods() Float64Index._add_logical_methods_disabled()
mit
annayqho/TheCannon
code/lamost/abundances/check_alpha.py
1
1522
import numpy as np import matplotlib.pyplot as plt import glob from matplotlib.colors import LogNorm from matplotlib import rc plt.rc('text', usetex=True) plt.rc('font', family='serif') files = glob.glob("output/*all_cannon_labels.npz") chisq = glob.glob("output/*cannon_label_chisq.npz") feh_all = [] #teff_all = [] alpha_all = [] chisq_all = [] for i,f in enumerate(files): labels = np.load(f)['arr_0'] chisq_val = np.load(chisq[i])['arr_0'] chisq_all.extend(chisq_val) feh = labels[:,2] feh_all.extend(feh) alpha = labels[:,3] alpha_all.extend(alpha) #teff = labels[:,0] #teff_all.extend(teff) tr_feh = np.load("tr_label.npz")['arr_0'][:,2] tr_afe = np.load("tr_label.npz")['arr_0'][:,3] feh_all = np.array(feh_all) alpha_all = np.array(alpha_all) #teff_all = np.array(teff_all) print("%s objects so far" %len(feh_all)) #plt.hist2d(feh_all, alpha_all, norm=LogNorm(), cmap="gray_r", bins=50) plt.hist2d(feh_all, alpha_all, norm=LogNorm(), cmap="gray_r", bins=60, range=[[-2.2,.9],[-.2,.6]]) #choose = teff_all < 4000 #plt.scatter(feh_all, alpha_all, c=teff_all, edgecolor='none', s=1, vmin=3500, vmax=5500) #plt.scatter(tr_feh, tr_afe, edgecolor='none', c='red', s=1, label="training set") plt.xlabel(r"$[Fe/H]$" + r" (dex) from Cannon/LAMOST", fontsize=16) plt.ylabel(r"$[\alpha/M]$" + r" (dex) from Cannon/LAMOST", fontsize=16) plt.ylim(-0.2,0.5) plt.tick_params(axis='x', labelsize=14) plt.tick_params(axis='y', labelsize=14) #plt.legend() plt.savefig("feh_alpha_temp.png")
mit
tswast/google-cloud-python
translate/docs/conf.py
2
11927
# -*- coding: utf-8 -*- # # google-cloud-translate documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) __version__ = "0.1.0" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.6.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Allow markdown includes (so releases.md can include CHANGLEOG.md) # http://www.sphinx-doc.org/en/master/markdown.html source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"google-cloud-translate" copyright = u"2017, Google" author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for Python", "github_user": "googleapis", "github_repo": "google-cloud-python", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-translate-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 "ref.python" ] # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "google-cloud-translate.tex", u"google-cloud-translate Documentation", author, "manual", ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "google-cloud-translate", u"google-cloud-translate Documentation", [author], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "google-cloud-translate", u"google-cloud-translate Documentation", author, "google-cloud-translate", "GAPIC library for the {metadata.shortName} v3beta1 service", "APIs", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("https://requests.kennethreitz.org/en/stable/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
apache-2.0
rbharath/deepchem
examples/muv/muv_sklearn.py
3
1150
""" Script that trains Sklearn multitask models on MUV dataset. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import shutil import numpy as np import deepchem as dc from muv_datasets import load_muv from sklearn.ensemble import RandomForestClassifier np.random.seed(123) # Load MUV dataset muv_tasks, muv_datasets, transformers = load_muv() (train_dataset, valid_dataset, test_dataset) = muv_datasets # Fit models metric = dc.metrics.Metric( dc.metrics.roc_auc_score, np.mean, mode="classification") def model_builder(model_dir): sklearn_model = RandomForestClassifier( class_weight="balanced", n_estimators=500) return dc.models.SklearnModel(sklearn_model, model_dir) model = dc.models.SingletaskToMultitask(muv_tasks, model_builder) # Fit trained model model.fit(train_dataset) model.save() # Evaluate train/test scores train_scores = model.evaluate(train_dataset, [metric], transformers) valid_scores = model.evaluate(valid_dataset, [metric], transformers) print("Train scores") print(train_scores) print("Validation scores") print(valid_scores)
mit
boland1992/seissuite_iran
seissuite/ant/psdepthmodel.py
6
9233
""" Module taking care of the forward modelling: theoretical dispersion curve given a 1D crustal model of velocities and densities. Uses the binaries of the Computer Programs in Seismology, with must be installed in *COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR* """ import numpy as np import matplotlib.pyplot as plt import os import shutil import itertools as it from easyprocess import EasyProcess import tempfile import pickle # getting the dir of the binaries of the Computer Programs in Seismology # import CONFIG class initalised in ./configs/tmp_config.pickle config_pickle = 'configs/tmp_config.pickle' f = open(name=config_pickle, mode='rb') CONFIG = pickle.load(f) f.close() # import variables from initialised CONFIG class. COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR=CONFIG.COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR # default header of the model file: # isotropic, 1D, flat Earth with layers of constant velocity MODEL_HEADER = """MODEL.01 TEST ISOTROPIC KGS FLAT EARTH 1-D CONSTANT VELOCITY LINE08 LINE09 LINE10 LINE11 H VP VS RHO QP QS ETAP ETAS FREFP FREFS""" class VsModel: """ Class holding a layered model of Vs function of depth, with Vp/Vs and rho/Vs ratio fixed. """ def __init__(self, vs, dz, ratio_vp_vs, ratio_rho_vs, name='', store_vg_at_periods=None): """ Initializes model with layers' Vs (vs), layers' thickness (dz), and layers' ratio Vp/Vs and rho/Vs (ratio_vp_vs, ratio_rho_vs). """ # checking shapes nlayers = np.size(vs) if np.size(dz) != nlayers - 1: raise Exception("Size of dz should be nb of layers minus 1") if not np.size(ratio_vp_vs) in [1, nlayers]: raise Exception("Size of ratio_vp_vs should be nb of layers or 1") if not np.size(ratio_rho_vs) in [1, nlayers]: raise Exception("Size of ratio_rho_vs should be nb of layers or 1") self.name = name self.vs = np.array(vs) self.dz = np.array(dz) self.ratio_vp_vs = np.array(ratio_vp_vs) self.ratio_rho_vs = np.array(ratio_rho_vs) # storing vg model at selected periods if required self.stored_vgperiods = store_vg_at_periods if not store_vg_at_periods is None: self.stored_vg = self.vg_model(store_vg_at_periods) else: self.stored_vg = None def misfit_to_vg(self, periods, vg, sigmavg, squared=True, use_storedvg=True, storevg=False): """ Misfit of modelled vg to observed vg [vg_model - vg]**2 = Sum ------------------ over periods 2 x sigmavg**2 """ # using stored vg model if required and available, else re-calculating it if use_storedvg and np.all(periods == self.stored_vgperiods): vg_model = self.stored_vg else: vg_model = self.vg_model(periods, store=storevg) misfit = np.sum(((vg_model - vg) / sigmavg)**2) / 2.0 if squared: misfit = np.sqrt(misfit) return misfit def vg_model(self, periods, store=False): """ Modelled group velocities, vg, function of period """ vs = self.vs vp = self.ratio_vp_vs * self.vs rho = self.ratio_rho_vs * self.vs dz = np.r_[self.dz, 0] # we append a fake thickness vg = Rayleigh_group_velocities(periods, dz=dz, vp=vp, vs=vs, rho=rho) if store: # storing group velocities if required self.stored_vgperiods = periods self.stored_vg = vg return vg def get_vs_at(self, z): """ Returns Vs ad depth(s) *z* """ indices = np.searchsorted(np.r_[0, self.dz.cumsum()], z, side='right') - 1 if np.any(indices) < 0: raise Exception("Depth out of range") return self.vs[indices] def plot(self, periods, obsvgarrays=None, fig=None, color='r'): """ Plots modelled and observed group velocity function of period (top) and the model itself, i.e. Vs vs depth (bottom) """ if not fig: fig = plt.figure(figsize=(6.5, 10), tight_layout=True) axlist = [fig.add_subplot(211), fig.add_subplot(212)] legend = True else: axlist = fig.get_axes() legend = False # no need to add legend to existing fig # 1st subplot: group velocity vs period ax = axlist[0] self.plot_vg(periods, obsvgarrays=obsvgarrays, ax=ax, legend=legend, color=color) ax.set_title(self.name) # 2nd subplot: Vs vs depth ax = axlist[1] self.plot_model(ax=ax, color=color) fig.canvas.draw() fig.show() return fig def plot_vg(self, periods, obsvgarrays=None, ax=None, legend=True, color='r'): """ Plots modelled and observed group velocity function of period """ # creating figure if not given as input fig = None if not ax: fig = plt.figure() ax = fig.add_subplot(111) vg_model = self.vg_model(periods) ax.plot(periods, vg_model, lw=1.5, color=color, label=self.name) if obsvgarrays: for i, vgarray in enumerate(obsvgarrays): label = 'Observed dispersion curves' if not i else None ax.plot(periods, vgarray, lw=0.5, color='k', label=label) ax.set_xlabel('Period (sec)') ax.set_ylabel('Group velocity (km/s)') if legend: ax.legend(loc='best', fontsize=11, framealpha=0.8) ax.grid(True) if fig: fig.show() def plot_model(self, ax=None, color='r', format_axes=True): """ Plots the model, i.e. Vs vs depth """ # creating figure if not given as input fig = None if not ax: fig = plt.figure() ax = fig.add_subplot(111) x = list(it.chain.from_iterable([[v, v] for v in self.vs])) y = [0.0] + list(it.chain.from_iterable([[z, z] for z in np.cumsum(self.dz)])) + \ [self.dz.sum() + 15] ax.plot(x, y, lw=1.5, color=color) if format_axes: ax.set_ylim(sorted(ax.get_ylim(), reverse=True)) ax.set_xlabel('Vs (km/s)') ax.set_ylabel('Depth (km)') ax.grid(True) if fig: fig.show() def Rayleigh_group_velocities(periods, dz, vp, vs, rho, verbose=False): """ Returns the array of Rayleigh wave group velocities at selected periods, from the 1-D layered Earth model contained in *dz* (thicknesses), *vp* (P wave velocities), *vs* (S wave velocities) and *rho* (densities). The Computer Programs in Seismology, located in dir *COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR*, are used for the computation. """ if not COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR: raise Exception("Please provide the dir of the Computer Programs in Seismology") # making and moving to temporary dir current_dir = os.getcwd() tmp_dir = tempfile.mkdtemp() os.chdir(tmp_dir) # preparing input files if verbose: print 'Preparing model and periods files' create_model_file('model', dz, vp, vs, rho) f = open('periods', 'w') f.write('\n'.join([str(p) for p in periods])) f.close() # preparing model if verbose: print "Calling sprep96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sprep96') # Rayleigh wave, fundamental mode p = EasyProcess('"{}" -M model -PARR periods -NMOD 1 -R'.format(cmd)).call() if verbose: print p.stdout # phase dispersion curve if verbose: print "Calling sdisp96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdisp96') p = EasyProcess('"{}" -v'.format(cmd)).call() if verbose: print p.stdout # group dispersion curve if verbose: print "Calling sregn96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sregn96') p = EasyProcess('"{}"'.format(cmd)).call() if verbose: print p.stdout # exporting group velocities (-U) of Rayleigh waves (-R) in ascii file if verbose: print "Calling sdpegn96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdpegn96') p = EasyProcess('"{}" -R -S -U -XLOG -PER -ASC'.format(cmd)).call() if verbose: print p.stdout # loading group velocities from 6th column of ascii file vg = np.loadtxt('SREGN.ASC', skiprows=1, usecols=(5,)) # removing temp dir os.chdir(current_dir) shutil.rmtree(tmp_dir) return vg def create_model_file(path, dz, vp, vs, rho): """ Writing the 1D model to ascci file, to be used as input by the Computer Programs in Seismology """ qp = np.zeros_like(dz) qs = np.zeros_like(dz) etap = np.zeros_like(dz) etas = np.zeros_like(dz) frefp = np.ones_like(dz) frefs = np.ones_like(dz) f = open(path, mode='w') f.write(MODEL_HEADER) a = np.vstack((dz, vp, vs, rho, qp, qs, etap, etas, frefp, frefs)) for col in a.T: f.write('\n') col.tofile(f, sep=' ') f.close()
gpl-3.0
travisfcollins/gnuradio
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
78
6117
#!/usr/bin/env python import sys, math import argparse from volk_test_funcs import * try: import matplotlib import matplotlib.pyplot as plt except ImportError: sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n") sys.exit(1) def main(): desc='Plot Volk performance results from a SQLite database. ' + \ 'Run one of the volk tests first (e.g, volk_math.py)' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-D', '--database', type=str, default='volk_results.db', help='Database file to read data from [default: %(default)s]') parser.add_argument('-E', '--errorbars', action='store_true', default=False, help='Show error bars (1 standard dev.)') parser.add_argument('-P', '--plot', type=str, choices=['mean', 'min', 'max'], default='mean', help='Set the type of plot to produce [default: %(default)s]') parser.add_argument('-%', '--percent', type=str, default=None, metavar="table", help='Show percent difference to the given type [default: %(default)s]') args = parser.parse_args() # Set up global plotting properties matplotlib.rcParams['figure.subplot.bottom'] = 0.2 matplotlib.rcParams['figure.subplot.top'] = 0.95 matplotlib.rcParams['figure.subplot.right'] = 0.98 matplotlib.rcParams['ytick.labelsize'] = 16 matplotlib.rcParams['xtick.labelsize'] = 16 matplotlib.rcParams['legend.fontsize'] = 18 # Get list of tables to compare conn = create_connection(args.database) tables = list_tables(conn) M = len(tables) # Colors to distinguish each table in the bar graph # More than 5 tables will wrap around to the start. colors = ['b', 'r', 'g', 'm', 'k'] # Set up figure for plotting f0 = plt.figure(0, facecolor='w', figsize=(14,10)) s0 = f0.add_subplot(1,1,1) # Create a register of names that exist in all tables tmp_regs = [] for table in tables: # Get results from the next table res = get_results(conn, table[0]) tmp_regs.append(list()) for r in res: try: tmp_regs[-1].index(r['kernel']) except ValueError: tmp_regs[-1].append(r['kernel']) # Get only those names that are common in all tables name_reg = tmp_regs[0] for t in tmp_regs[1:]: name_reg = list(set(name_reg) & set(t)) name_reg.sort() # Pull the data out for each table into a dictionary # we can ref the table by it's name and the data associated # with a given kernel in name_reg by it's name. # This ensures there is no sorting issue with the data in the # dictionary, so the kernels are plotted against each other. table_data = dict() for i,table in enumerate(tables): # Get results from the next table res = get_results(conn, table[0]) data = dict() for r in res: data[r['kernel']] = r table_data[table[0]] = data if args.percent is not None: for i,t in enumerate(table_data): if args.percent == t: norm_data = [] for name in name_reg: if(args.plot == 'max'): norm_data.append(table_data[t][name]['max']) elif(args.plot == 'min'): norm_data.append(table_data[t][name]['min']) elif(args.plot == 'mean'): norm_data.append(table_data[t][name]['avg']) # Plot the results x0 = xrange(len(name_reg)) i = 0 for t in (table_data): ydata = [] stds = [] for name in name_reg: stds.append(math.sqrt(table_data[t][name]['var'])) if(args.plot == 'max'): ydata.append(table_data[t][name]['max']) elif(args.plot == 'min'): ydata.append(table_data[t][name]['min']) elif(args.plot == 'mean'): ydata.append(table_data[t][name]['avg']) if args.percent is not None: ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)] if(args.percent != t): # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80/(M-1) x1 = [x + i*wdth for x in x0] i += 1 s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80/M x1 = [x + i*wdth for x in x0] i += 1 if(args.errorbars is False): s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: s0.bar(x1, ydata, width=wdth, yerr=stds, color=colors[i%M], label=t, edgecolor='k', linewidth=2, error_kw={"ecolor": 'k', "capsize":5, "linewidth":2}) nitems = res[0]['nitems'] if args.percent is None: s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems), fontsize=22, fontweight='bold', horizontalalignment='center') else: s0.set_ylabel("% Improvement over {0} [{1:G} items]".format( args.percent, nitems), fontsize=22, fontweight='bold') s0.legend() s0.set_xticks(x0) s0.set_xticklabels(name_reg) for label in s0.xaxis.get_ticklabels(): label.set_rotation(45) label.set_fontsize(16) plt.show() if __name__ == "__main__": main()
gpl-3.0
vkscool/nupic
examples/audiostream/audiostream_tp.py
12
9994
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ See README.md for details. """ """ numpy - the language of pyaudio (& everything else) pyaudio - access to the mic via the soundcard pyplot - to plot the sound frequencies bitmaparray - encodes an array of indices into an SDR TP10X2 - the C++ optimized temporal pooler (TP) """ import numpy import pyaudio import matplotlib.pyplot as plt from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder from nupic.research.TP10X2 import TP10X2 as TP class Visualizations: def calcAnomaly(self, actual, predicted): """ Calculates the anomaly of two SDRs Uses the equation presented on the wiki: https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo To put this in terms of the temporal pooler: A is the actual input array at a given timestep P is the predicted array that was produced from the previous timestep(s) [A - (A && P)] / [A] Rephrasing as questions: What bits are on in A that are not on in P? How does that compare to total on bits in A? Outputs 0 is there's no difference between P and A. Outputs 1 if P and A are totally distinct. Not a perfect metric - it doesn't credit proximity Next step: combine with a metric for a spatial pooler """ combined = numpy.logical_and(actual, predicted) delta = numpy.logical_xor(actual,combined) delta_score = sum(delta) actual_score = float(sum(actual)) return delta_score / actual_score def compareArray(self, actual, predicted): """ Produce an array that compares the actual & predicted 'A' - actual 'P' - predicted 'E' - expected (both actual & predicted ' ' - neither an input nor predicted """ compare = [] for i in range(actual.size): if actual[i] and predicted[i]: compare.append('E') elif actual[i]: compare.append('A') elif predicted[i]: compare.append('P') else: compare.append(' ') return compare def hashtagAnomaly(self, anomaly): """ Basic printout method to visualize the anomaly score (scale: 1 - 50 #'s) """ hashcount = '#' for i in range(int(anomaly / 0.02)): hashcount += '#' for j in range(int((1 - anomaly) / 0.02)): hashcount += '.' return hashcount class AudioStream: def __init__(self): """ Instantiate temporal pooler, encoder, audio sampler, filter, & freq plot """ self.vis = Visualizations() """ The number of columns in the input and therefore the TP 2**9 = 512 Trial and error pulled that out numCols should be tested during benchmarking """ self.numCols = 2**9 sparsity = 0.10 self.numInput = int(self.numCols * sparsity) """ Create a bit map encoder From the encoder's __init__ method: 1st arg: the total bits in input 2nd arg: the number of bits used to encode each input bit """ self.e = SparsePassThroughEncoder(self.numCols, 1) """ Sampling details rate: The sampling rate in Hz of my soundcard buffersize: The size of the array to which we will save audio segments (2^12 = 4096 is very good) secToRecord: The length of each sampling buffersToRecord: how many multiples of buffers are we recording? """ rate=44100 secToRecord=.1 self.buffersize=2**12 self.buffersToRecord=int(rate*secToRecord/self.buffersize) if not self.buffersToRecord: self.buffersToRecord=1 """ Filters in Hertz highHertz: lower limit of the bandpass filter, in Hertz lowHertz: upper limit of the bandpass filter, in Hertz max lowHertz = (buffersize / 2 - 1) * rate / buffersize """ highHertz = 500 lowHertz = 10000 """ Convert filters from Hertz to bins highpass: convert the highHertz into a bin for the FFT lowpass: convert the lowHertz into a bin for the FFt NOTES: highpass is at least the 1st bin since most mics only pick up >=20Hz lowpass is no higher than buffersize/2 - 1 (highest array index) passband needs to be wider than size of numInput - not checking for that """ self.highpass = max(int(highHertz * self.buffersize / rate),1) self.lowpass = min(int(lowHertz * self.buffersize / rate), self.buffersize/2 - 1) """ The call to create the temporal pooler region """ self.tp = TP(numberOfCols=self.numCols, cellsPerColumn=4, initialPerm=0.5, connectedPerm=0.5, minThreshold=10, newSynapseCount=10, permanenceInc=0.1, permanenceDec=0.07, activationThreshold=8, globalDecay=0.02, burnIn=2, checkSynapseConsistency=False, pamLength=100) """ Creating the audio stream from our mic """ p = pyaudio.PyAudio() self.inStream = p.open(format=pyaudio.paInt32,channels=1,rate=rate,input=True,frames_per_buffer=self.buffersize) """ Setting up the array that will handle the timeseries of audio data from our input """ self.audio = numpy.empty((self.buffersToRecord*self.buffersize),dtype="uint32") """ Print out the inputs """ print "Number of columns:\t" + str(self.numCols) print "Max size of input:\t" + str(self.numInput) print "Sampling rate (Hz):\t" + str(rate) print "Passband filter (Hz):\t" + str(highHertz) + " - " + str(lowHertz) print "Passband filter (bin):\t" + str(self.highpass) + " - " + str(self.lowpass) print "Bin difference:\t\t" + str(self.lowpass - self.highpass) print "Buffersize:\t\t" + str(self.buffersize) """ Setup the plot Use the bandpass filter frequency range as the x-axis Rescale the y-axis """ plt.ion() bin = range(self.highpass,self.lowpass) xs = numpy.arange(len(bin))*rate/self.buffersize + highHertz self.freqPlot = plt.plot(xs,xs)[0] plt.ylim(0, 10**12) while True: self.processAudio() def processAudio (self): """ Sample audio, encode, send it to the TP Pulls the audio from the mic Conditions that audio as an SDR Computes a prediction via the TP Update the visualizations """ """ Cycle through the multiples of the buffers we're sampling Sample audio to store for each frame in buffersize Mic voltage-level timeseries is saved as 32-bit binary Convert that 32-bit binary into integers, and save to array for the FFT """ for i in range(self.buffersToRecord): try: audioString = self.inStream.read(self.buffersize) except IOError: print "Overflow error from 'audiostring = inStream.read(buffersize)'. Try decreasing buffersize." quit() self.audio[i*self.buffersize:(i + 1)*self.buffersize] = numpy.fromstring(audioString,dtype = "uint32") """ Get int array of strength for each bin of frequencies via fast fourier transform Get the indices of the strongest frequencies (the top 'numInput') Scale the indices so that the frequencies fit to within numCols Pick out the unique indices (we've reduced the mapping, so we likely have multiples) Encode those indices into an SDR via the SparsePassThroughEncoder Cast the SDR as a float for the TP """ ys = self.fft(self.audio, self.highpass, self.lowpass) fs = numpy.sort(ys.argsort()[-self.numInput:]) rfs = fs.astype(numpy.float32) / (self.lowpass - self.highpass) * self.numCols ufs = numpy.unique(rfs) actualInt = self.e.encode(ufs) actual = actualInt.astype(numpy.float32) """ Pass the SDR to the TP Collect the prediction SDR from the TP Pass the prediction & actual SDRS to the anomaly calculator & array comparer Update the frequency plot """ self.tp.compute(actual, enableLearn = True, computeInfOutput = True) predictedInt = self.tp.getPredictedState().max(axis=1) compare = self.vis.compareArray(actualInt, predictedInt) anomaly = self.vis.calcAnomaly(actualInt, predictedInt) print "." . join(compare) print self.vis.hashtagAnomaly(anomaly) self.freqPlot.set_ydata(ys) plt.show(block = False) plt.draw() def fft(self, audio, highpass, lowpass): """ Fast fourier transform conditioning Output: 'output' contains the strength of each frequency in the audio signal frequencies are marked by its position in 'output': frequency = index * rate / buffesize output.size = buffersize/2 Method: Use numpy's FFT (numpy.fft.fft) Find the magnitude of the complex numbers returned (abs value) Split the FFT array in half, because we have mirror frequencies (they're the complex conjugates) Use just the first half to apply the bandpass filter Great info here: http://stackoverflow.com/questions/4364823/how-to-get-frequency-from-fft-result """ left,right = numpy.split(numpy.abs(numpy.fft.fft(audio)),2) output = left[highpass:lowpass] return output audiostream = AudioStream()
gpl-3.0
xapharius/mrEnsemble
Engine/src/algorithms/neuralnetwork/convolutional/conv_net.py
2
8771
""" Created on Jul 22, 2014 @author: Simon Hohberg """ import numpy as np from algorithms.neuralnetwork.feedforward.multilayer_perceptron import MultilayerPerceptron, \ SimpleUpdate import utils.numpyutils as nputils import copy import time from layers import ConvLayer, MaxPoolLayer from utils import logging from algorithms.AbstractAlgorithm import AbstractAlgorithm from datahandler.numerical.NumericalDataSet import NumericalDataSet import matplotlib.pyplot as plt class ConvNet(AbstractAlgorithm): def __init__(self, iterations=1, learning_rate=0.5, topo=[('c', 3, 4), ('p', 2), ('c', 3, 4), ('p', 9), ('mlp', 4, 4, 2)], activation_func=(np.tanh, nputils.tanh_deriv)): """ Creates a new convolutional neural network with the given topology (architecture), learning rate and number of iterations. :param iterations: number of iterations for training. :param learning_rate: rate for updating the weights :param topo: defines the architecture of the net. It is a list of tuples. Each tuple represents a layer, where the first element is a character that specifies the type of layer. E.g. 'c' convolutional layer, 'p' pooling layer, 'mlp' fully connected conventional neural network. The next elements in the tuple are layer specific. Convolutional: 2nd element defines the kernel size, e.g. 3 for a 3x3 kernel. 3rd element specifies the number of maps in the layer. Pooling: 2nd element defines the pool patch size, e.g. 2 for a pool patch size of 2x2. MLP: each element defines the layer size for the network. A complete example looks like this: [('c', 3, 4), ('p', 2), ('c', 3, 4), ('p', 9), ('mlp', 4, 4, 2)] """ self.split_ratio = 0.8 self.iterations = iterations self.learning_rate = learning_rate self.layers = [] self.activ_func = activation_func[0] self.deriv_acitv_func = activation_func[1] num_prev_maps = 1 self.topo = topo # parse topology for layer in topo: # convolutional layer if layer[0] == 'c': conv_layer = ConvLayer(num_prev_maps=num_prev_maps, kernel_size=layer[1], num_maps=layer[2]) self.add_layer(conv_layer) num_prev_maps = layer[2] # pooling layer elif layer[0] == 'p': self.add_layer(MaxPoolLayer(layer[1], num_prev_maps)) # multilayer perceptron elif layer[0] == 'mlp': self.mlp = MultilayerPerceptron(list(layer[1:]), do_classification=True, update_method=SimpleUpdate(self.learning_rate), activ_func=(self.activ_func, self.deriv_acitv_func)) def add_layer(self, layer): """ Adds the given layer to this network. :param layer: layer that is added """ self.layers.append(layer) def feedforward(self, inputs): """ Feed input forward through net calculating the ouput of each layer. :param inputs: 3D numpy array (usually a list of images) :return: List of 3D numpy arrays each representing the output of a layer except the first array in the list which is the input. """ outputs = [inputs] for layer in self.layers: outputs.append(layer.feedforward(outputs[-1])) outputs.extend(self.mlp.feedforward(outputs[-1])[1:]) return outputs def predict(self, inputs): predictions = self.predict_extended(inputs) if predictions[0].shape == (1,1): #binary output predictions = np.array(predictions).ravel() predictions[predictions <= 0] = 0 predictions[predictions > 0] = 1 return predictions[:, np.newaxis].astype(int) # multiclass sparse = np.zeros((len(predictions), predictions[0].shape[1])) for ix, _ in enumerate(sparse): sparse[ix][predictions[ix].argmax()] = 1 assert sparse.sum() == len(predictions) return sparse def predict_extended(self, inputs): """ Predicts targets for given data set. @param data_set: data Set inheriting AbstractDataSet :return: List of predictions, i.e. output of this net for each observation in the data set. """ data_set = NumericalDataSet(inputs) predictions = [] # loop through dataset for observation, _ in data_set.gen_observations( ): # make sure it is a numpy array input_arr = np.array(observation) outputs = self.feedforward(input_arr) predictions.append(outputs[-1]) return predictions def predict_single(self, input_arr): """ Predict class for a single observation. :param input_arr: Observation :return: Prediction for given observation """ return self.feedforward(input_arr)[-1] def fit(self, inputs, targets): """ Train net with given data set. :param data_set: Data set for training. n times random sampling for online learning """ split_point = int(len(inputs) * self.split_ratio) data_set = NumericalDataSet(inputs[:split_point], targets[:split_point]) val_in = inputs[split_point:] val_targets = targets[split_point:] prev_layers = None prev_mlp = None self.train_acc_err = [] self.val_acc_err = [] for it in range(self.iterations): # randomly select observations as many times as there are # observations it_error = 0 start = time.time() for _ in range(data_set.get_nr_observations()): input_arr, target_arr = data_set.rand_observation() # feed-forward outputs = self.feedforward(input_arr) current_error = nputils.calc_squared_error(target_arr, outputs[-1]) it_error += current_error # mlp backpropagation and gradient descent mlp_outputs = outputs[-len(self.mlp.arr_layer_sizes):] mlp_deltas = self.mlp.backpropagation(mlp_outputs, target_arr) mlp_weight_updates = self.mlp.calculate_weight_updates(mlp_deltas, mlp_outputs) self.mlp.update_method.perform_update(self.mlp.weights_arr, mlp_weight_updates, current_error) # layer backpropagation and gradient descent # calculate backpropagated error of first mlp layer backprop_error = np.array([[x] for x in np.dot(self.mlp.weights_arr[0], mlp_deltas[0].transpose())]) for layer in reversed(self.layers): backprop_error = layer.backpropagate(backprop_error) # calculate the weight gradients and update the weights for layer in self.layers: layer.calc_gradients() layer.update(self.learning_rate) avg_error = it_error / data_set.nrObservations acc_err = self._accuracy_err(inputs, targets) self.train_acc_err.append(acc_err) #validation error acc_err = self._accuracy_err(val_in, val_targets) self.val_acc_err.append(acc_err) logging.info("Iteration #{} MSE: {}, TrainErr: {:.6f}, ValErr: {:.6f} ({:.2f}s)\n"\ .format(it + 1, avg_error, self.train_acc_err[-1], self.val_acc_err[-1], time.time()-start)) #break cond if it > 3 and val_in is not None and self.val_acc_err[-1] > self.val_acc_err[-4]: # revert self.layers = prev_layers self.mlp = prev_mlp plt.figure() plt.plot(self.train_acc_err) plt.plot(self.val_acc_err) plt.show(block=False) break #prev if it > 0: prev_layers = copy.deepcopy(self.layers) prev_mlp = copy.deepcopy(self.mlp) def _accuracy_err(self, inputs, targets): if targets.shape[1] == 1: predictions = self.predict(inputs) acc_err = 1 - (predictions == targets).sum() / float(len(inputs)) else: predictions = self.predict_extended(inputs) acc_err = 1 - ((np.vstack(predictions)).argmax(axis=1)==targets.argmax(axis=1)).sum() / float(len(inputs)) return acc_err def set_params(self, parameters): pass def get_params(self): dct = {} dct["learning_rate"] = self.learning_rate dct["topo"] = self.topo return dct
mit
mattgiguere/scikit-learn
sklearn/utils/extmath.py
142
21102
""" Extended math utilities. """ # Authors: Gael Varoquaux # Alexandre Gramfort # Alexandre T. Passos # Olivier Grisel # Lars Buitinck # Stefan van der Walt # Kyle Kastner # License: BSD 3 clause from __future__ import division from functools import partial import warnings import numpy as np from scipy import linalg from scipy.sparse import issparse from . import check_random_state from .fixes import np_version from ._logistic_sigmoid import _log_logistic_sigmoid from ..externals.six.moves import xrange from .sparsefuncs_fast import csr_row_norms from .validation import check_array, NonBLASDotWarning def norm(x): """Compute the Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). More precise than sqrt(squared_norm(x)). """ x = np.asarray(x) nrm2, = linalg.get_blas_funcs(['nrm2'], [x]) return nrm2(x) # Newer NumPy has a ravel that needs less copying. if np_version < (1, 7, 1): _ravel = np.ravel else: _ravel = partial(np.ravel, order='K') def squared_norm(x): """Squared Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). Faster than norm(x) ** 2. """ x = _ravel(x) return np.dot(x, x) def row_norms(X, squared=False): """Row-wise (squared) Euclidean norm of X. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse matrices and does not create an X.shape-sized temporary. Performs no input validation. """ if issparse(X): norms = csr_row_norms(X) else: norms = np.einsum('ij,ij->i', X, X) if not squared: np.sqrt(norms, norms) return norms def fast_logdet(A): """Compute log(det(A)) for A symmetric Equivalent to : np.log(nl.det(A)) but more robust. It returns -Inf if det(A) is non positive or is not defined. """ sign, ld = np.linalg.slogdet(A) if not sign > 0: return -np.inf return ld def _impose_f_order(X): """Helper Function""" # important to access flags instead of calling np.isfortran, # this catches corner cases. if X.flags.c_contiguous: return check_array(X.T, copy=False, order='F'), True else: return check_array(X, copy=False, order='F'), False def _fast_dot(A, B): if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c' raise ValueError if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64) for x in [A, B]): warnings.warn('Data must be of same type. Supported types ' 'are 32 and 64 bit float. ' 'Falling back to np.dot.', NonBLASDotWarning) raise ValueError if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2: raise ValueError # scipy 0.9 compliant API dot = linalg.get_blas_funcs(['gemm'], (A, B))[0] A, trans_a = _impose_f_order(A) B, trans_b = _impose_f_order(B) return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b) def _have_blas_gemm(): try: linalg.get_blas_funcs(['gemm']) return True except (AttributeError, ValueError): warnings.warn('Could not import BLAS, falling back to np.dot') return False # Only use fast_dot for older NumPy; newer ones have tackled the speed issue. if np_version < (1, 7, 2) and _have_blas_gemm(): def fast_dot(A, B): """Compute fast dot products directly calling BLAS. This function calls BLAS directly while warranting Fortran contiguity. This helps avoiding extra copies `np.dot` would have created. For details see section `Linear Algebra on large Arrays`: http://wiki.scipy.org/PerformanceTips Parameters ---------- A, B: instance of np.ndarray Input arrays. Arrays are supposed to be of the same dtype and to have exactly 2 dimensions. Currently only floats are supported. In case these requirements aren't met np.dot(A, B) is returned instead. To activate the related warning issued in this case execute the following lines of code: >> import warnings >> from sklearn.utils.validation import NonBLASDotWarning >> warnings.simplefilter('always', NonBLASDotWarning) """ try: return _fast_dot(A, B) except ValueError: # Maltyped or malformed data. return np.dot(A, B) else: fast_dot = np.dot def density(w, **kwargs): """Compute density of a sparse vector Return a value between 0 and 1 """ if hasattr(w, "toarray"): d = float(w.nnz) / (w.shape[0] * w.shape[1]) else: d = 0 if w is None else float((w != 0).sum()) / w.size return d def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return fast_dot(a, b) def randomized_range_finder(A, size, n_iter, random_state=None): """Computes an orthonormal matrix whose range approximates the range of A. Parameters ---------- A: 2D array The input data matrix size: integer Size of the return array n_iter: integer Number of power iterations used to stabilize the result random_state: RandomState or an int seed (0 by default) A random number generator instance Returns ------- Q: 2D array A (size x size) projection matrix, the range of which approximates well the range of the input matrix A. Notes ----- Follows Algorithm 4.3 of Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 """ random_state = check_random_state(random_state) # generating random gaussian vectors r with shape: (A.shape[1], size) R = random_state.normal(size=(A.shape[1], size)) # sampling the range of A using by linear projection of r Y = safe_sparse_dot(A, R) del R # perform power iterations with Y to further 'imprint' the top # singular vectors of A in Y for i in xrange(n_iter): Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y)) # extracting an orthonormal basis of the A range samples Q, R = linalg.qr(Y, mode='economic') return Q def randomized_svd(M, n_components, n_oversamples=10, n_iter=0, transpose='auto', flip_sign=True, random_state=0): """Computes a truncated randomized SVD Parameters ---------- M: ndarray or sparse matrix Matrix to decompose n_components: int Number of singular values and vectors to extract. n_oversamples: int (default is 10) Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. n_iter: int (default is 0) Number of power iterations (can be used to deal with very noisy problems). transpose: True, False or 'auto' (default) Whether the algorithm should be applied to M.T instead of M. The result should approximately be the same. The 'auto' mode will trigger the transposition if M.shape[1] > M.shape[0] since this implementation of randomized SVD tend to be a little faster in that case). flip_sign: boolean, (True by default) The output of a singular value decomposition is only unique up to a permutation of the signs of the singular vectors. If `flip_sign` is set to `True`, the sign ambiguity is resolved by making the largest loadings for each component in the left singular vectors positive. random_state: RandomState or an int seed (0 by default) A random number generator instance to make behavior Notes ----- This algorithm finds a (usually very good) approximate truncated singular value decomposition using randomization to speed up the computations. It is particularly fast on large matrices on which you wish to extract only a small number of components. References ---------- * Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061 * A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert """ random_state = check_random_state(random_state) n_random = n_components + n_oversamples n_samples, n_features = M.shape if transpose == 'auto' and n_samples > n_features: transpose = True if transpose: # this implementation is a bit faster with smaller shape[1] M = M.T Q = randomized_range_finder(M, n_random, n_iter, random_state) # project M to the (k + p) dimensional space using the basis vectors B = safe_sparse_dot(Q.T, M) # compute the SVD on the thin matrix: (k + p) wide Uhat, s, V = linalg.svd(B, full_matrices=False) del B U = np.dot(Q, Uhat) if flip_sign: U, V = svd_flip(U, V) if transpose: # transpose back the results according to the input convention return V[:n_components, :].T, s[:n_components], U[:, :n_components].T else: return U[:, :n_components], s[:n_components], V[:n_components, :] def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 """ arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out def weighted_mode(a, w, axis=0): """Returns an array of the weighted modal (most common) value in a If there is more than one such value, only the first is returned. The bin-count for the modal bins is also returned. This is an extension of the algorithm in scipy.stats.mode. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). w : array_like n-dimensional array of weights for each value axis : int, optional Axis along which to operate. Default is 0, i.e. the first axis. Returns ------- vals : ndarray Array of modal values. score : ndarray Array of weighted counts for each mode. Examples -------- >>> from sklearn.utils.extmath import weighted_mode >>> x = [4, 1, 4, 2, 4, 2] >>> weights = [1, 1, 1, 1, 1, 1] >>> weighted_mode(x, weights) (array([ 4.]), array([ 3.])) The value 4 appears three times: with uniform weights, the result is simply the mode of the distribution. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's >>> weighted_mode(x, weights) (array([ 2.]), array([ 3.5])) The value 2 has the highest score: it appears twice with weights of 1.5 and 2: the sum of these is 3. See Also -------- scipy.stats.mode """ if axis is None: a = np.ravel(a) w = np.ravel(w) axis = 0 else: a = np.asarray(a) w = np.asarray(w) axis = axis if a.shape != w.shape: w = np.zeros(a.shape, dtype=w.dtype) + w scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape) oldcounts = np.zeros(testshape) for score in scores: template = np.zeros(a.shape) ind = (a == score) template[ind] = w[ind] counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return mostfrequent, oldcounts def pinvh(a, cond=None, rcond=None, lower=True): """Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix. Calculate a generalized inverse of a symmetric matrix using its eigenvalue decomposition and including all 'large' eigenvalues. Parameters ---------- a : array, shape (N, N) Real symmetric or complex hermetian matrix to be pseudo-inverted cond : float or None, default None Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. rcond : float or None, default None (deprecated) Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : boolean Whether the pertinent array data is taken from the lower or upper triangle of a. (Default: lower) Returns ------- B : array, shape (N, N) Raises ------ LinAlgError If eigenvalue does not converge Examples -------- >>> import numpy as np >>> a = np.random.randn(9, 6) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = np.asarray_chkfinite(a) s, u = linalg.eigh(a, lower=lower) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps # unlike svd case, eigh can lead to negative eigenvalues above_cutoff = (abs(s) > cond * np.max(abs(s))) psigma_diag = np.zeros_like(s) psigma_diag[above_cutoff] = 1.0 / s[above_cutoff] return np.dot(u * psigma_diag, np.conjugate(u).T) def cartesian(arrays, out=None): """Generate a cartesian product of input arrays. Parameters ---------- arrays : list of array-like 1-D arrays to form the cartesian product of. out : ndarray Array to place the cartesian product in. Returns ------- out : ndarray 2-D array of shape (M, len(arrays)) containing cartesian products formed of input arrays. Examples -------- >>> cartesian(([1, 2, 3], [4, 5], [6, 7])) array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) """ arrays = [np.asarray(x) for x in arrays] shape = (len(x) for x in arrays) dtype = arrays[0].dtype ix = np.indices(shape) ix = ix.reshape(len(arrays), -1).T if out is None: out = np.empty_like(ix, dtype=dtype) for n, arr in enumerate(arrays): out[:, n] = arrays[n][ix[:, n]] return out def svd_flip(u, v, u_based_decision=True): """Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u, v : ndarray u and v are the output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. u_based_decision : boolean, (default=True) If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted, v_adjusted : arrays with the same dimensions as the input. """ if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, xrange(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[xrange(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v def log_logistic(X, out=None): """Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``. This implementation is numerically stable because it splits positive and negative values:: -log(1 + exp(-x_i)) if x_i > 0 x_i - log(1 + exp(x_i)) if x_i <= 0 For the ordinary logistic function, use ``sklearn.utils.fixes.expit``. Parameters ---------- X: array-like, shape (M, N) Argument to the logistic function out: array-like, shape: (M, N), optional: Preallocated output array. Returns ------- out: array, shape (M, N) Log of the logistic function evaluated at every point in x Notes ----- See the blog post describing this implementation: http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/ """ is_1d = X.ndim == 1 X = check_array(X, dtype=np.float) n_samples, n_features = X.shape if out is None: out = np.empty_like(X) _log_logistic_sigmoid(n_samples, n_features, X, out) if is_1d: return np.squeeze(out) return out def safe_min(X): """Returns the minimum value of a dense or a CSR/CSC matrix. Adapated from http://stackoverflow.com/q/13426580 """ if issparse(X): if len(X.data) == 0: return 0 m = X.data.min() return m if X.getnnz() == X.size else min(m, 0) else: return X.min() def make_nonnegative(X, min_value=0): """Ensure `X.min()` >= `min_value`.""" min_ = safe_min(X) if min_ < min_value: if issparse(X): raise ValueError("Cannot make the data matrix" " nonnegative because it is sparse." " Adding a value to every entry would" " make it no longer sparse.") X = X + (min_value - min_) return X def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count): """Calculate an average mean update and a Youngs and Cramer variance update. From the paper "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for variance update old_mean : array-like, shape: (n_features,) old_variance : array-like, shape: (n_features,) old_sample_count : int Returns ------- updated_mean : array, shape (n_features,) updated_variance : array, shape (n_features,) updated_sample_count : int References ---------- T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance: recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247 """ new_sum = X.sum(axis=0) new_variance = X.var(axis=0) * X.shape[0] old_sum = old_mean * old_sample_count n_samples = X.shape[0] updated_sample_count = old_sample_count + n_samples partial_variance = old_sample_count / (n_samples * updated_sample_count) * ( n_samples / old_sample_count * old_sum - new_sum) ** 2 unnormalized_variance = old_variance * old_sample_count + new_variance + \ partial_variance return ((old_sum + new_sum) / updated_sample_count, unnormalized_variance / updated_sample_count, updated_sample_count) def _deterministic_vector_sign_flip(u): """Modify the sign of vectors for reproducibility Flips the sign of elements of all the vectors (rows of u) such that the absolute maximum element of each vector is positive. Parameters ---------- u : ndarray Array with vectors as its rows. Returns ------- u_flipped : ndarray with same shape as u Array with the sign flipped vectors as its rows. """ max_abs_rows = np.argmax(np.abs(u), axis=1) signs = np.sign(u[range(u.shape[0]), max_abs_rows]) u *= signs[:, np.newaxis] return u
bsd-3-clause
trankmichael/scikit-learn
sklearn/tests/test_cross_validation.py
70
41943
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy import stats from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer, LabelBinarizer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) y = np.arange(10) // 2 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 2] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) assert_raises(ValueError, cval.StratifiedKFold, y, 0) assert_raises(ValueError, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: sorted_array = np.arange(100) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(101, 200) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(201, 300) assert_true(np.any(sorted_array != ind[train])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(y[train].size + y[test].size, y.size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False) assert_true(isinstance(X_train_arr, np.ndarray)) assert_true(isinstance(X_test_arr, np.ndarray)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error") expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(mse_scores, expected_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] with warnings.catch_warnings(record=True): # deprecated sequence of sequence format cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs) cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100))
bsd-3-clause
reinierbsv/PythonScripts
Scraping.py
1
2498
import requests from bs4 import BeautifulSoup import pandas as pd # page = requests.get("http://dataquestio.github.io/web-scraping-pages/simple.html") # print(page.content) # soup = BeautifulSoup(page.content, 'html.parser') # print(soup) # print(list(soup.children)) # print([type(item) for item in list(soup.children)]) # html = list(soup.children)[2] # print(html) # print(list(html.children)) # body = list(html.children)[3] # print(body) # print(list(body.children)) # p = list(body.children)[1] # print(p) # print(p.get_text()) # print(soup.find_all('p')) # print(soup.find_all('p')[0].get_text()) # page = requests.get("http://dataquestio.github.io/web-scraping-pages/ids_and_classes.html") # soup = BeautifulSoup(page.content, 'html.parser') # print(soup) # soup.find_all('p', class_='outer-text') # any p tag that has the class outer-text # soup.find_all(class_="outer-text") # look for any tag that has the class outer-text # soup.find_all(id="first") # search for elements by id # print(soup.select("div p")) # find all the p tags in our page that are inside of a div page = requests.get("http://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168#.WTdxFWg1-Uk") soup = BeautifulSoup(page.content, 'html.parser') #print(soup) seven_day = soup.find(id='seven-day-forecast-body') forecast_items = seven_day.find_all(class_="tombstone-container") tonight = forecast_items[0] print(tonight.prettify()) period = tonight.find(class_="period-name").get_text() short_desc = tonight.find(class_="short-desc") temp = tonight.find(class_="temp").get_text() # Adding spaces between strings in short_desc for r in short_desc: if (r.string is None): r.string = ' ' print(period) print(short_desc.get_text()) print(temp) # extract the title attribute from the img tag img = tonight.find("img") desc = img['title'] print(desc) period_tags = seven_day.select(".tombstone-container .period-name") periods = [pt.get_text() for pt in period_tags] print(periods) short_descs = [sd.get_text() for sd in seven_day.select(".tombstone-container .short-desc")] temps = [t.get_text() for t in seven_day.select(".tombstone-container .temp")] descs = [d["title"] for d in seven_day.select(".tombstone-container img")] print(short_descs) print(temps) print(descs) weather = pd.DataFrame({ "period": periods, "short_desc": short_descs, "temp": temps, "desc":descs }) print(weather)
gpl-3.0
luo66/scikit-learn
examples/ensemble/plot_adaboost_hastie_10_2.py
355
3576
""" ============================= Discrete versus Real AdaBoost ============================= This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates the difference in performance between the discrete SAMME [2] boosting algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated on a binary classification task where the target Y is a non-linear function of 10 input features. Discrete SAMME AdaBoost adapts based on errors in predicted class labels whereas real SAMME.R uses the predicted class probabilities. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>, # Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import zero_one_loss from sklearn.ensemble import AdaBoostClassifier n_estimators = 400 # A learning rate of 1. may not be optimal for both SAMME and SAMME.R learning_rate = 1. X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X_test, y_test = X[2000:], y[2000:] X_train, y_train = X[:2000], y[:2000] dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1) dt_stump.fit(X_train, y_train) dt_stump_err = 1.0 - dt_stump.score(X_test, y_test) dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1) dt.fit(X_train, y_train) dt_err = 1.0 - dt.score(X_test, y_test) ada_discrete = AdaBoostClassifier( base_estimator=dt_stump, learning_rate=learning_rate, n_estimators=n_estimators, algorithm="SAMME") ada_discrete.fit(X_train, y_train) ada_real = AdaBoostClassifier( base_estimator=dt_stump, learning_rate=learning_rate, n_estimators=n_estimators, algorithm="SAMME.R") ada_real.fit(X_train, y_train) fig = plt.figure() ax = fig.add_subplot(111) ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-', label='Decision Stump Error') ax.plot([1, n_estimators], [dt_err] * 2, 'k--', label='Decision Tree Error') ada_discrete_err = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)): ada_discrete_err[i] = zero_one_loss(y_pred, y_test) ada_discrete_err_train = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)): ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train) ada_real_err = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_real.staged_predict(X_test)): ada_real_err[i] = zero_one_loss(y_pred, y_test) ada_real_err_train = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_real.staged_predict(X_train)): ada_real_err_train[i] = zero_one_loss(y_pred, y_train) ax.plot(np.arange(n_estimators) + 1, ada_discrete_err, label='Discrete AdaBoost Test Error', color='red') ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train, label='Discrete AdaBoost Train Error', color='blue') ax.plot(np.arange(n_estimators) + 1, ada_real_err, label='Real AdaBoost Test Error', color='orange') ax.plot(np.arange(n_estimators) + 1, ada_real_err_train, label='Real AdaBoost Train Error', color='green') ax.set_ylim((0.0, 0.5)) ax.set_xlabel('n_estimators') ax.set_ylabel('error rate') leg = ax.legend(loc='upper right', fancybox=True) leg.get_frame().set_alpha(0.7) plt.show()
bsd-3-clause
RPGOne/scikit-learn
examples/mixture/plot_concentration_prior.py
25
5631
""" ======================================================================== Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture ======================================================================== This example plots the ellipsoids obtained from a toy dataset (mixture of three Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a Dirichlet distribution prior (``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet process prior (``weight_concentration_prior_type='dirichlet_process'``). On each figure, we plot the results for three different values of the weight concentration prior. The ``BayesianGaussianMixture`` class can adapt its number of mixture componentsautomatically. The parameter ``weight_concentration_prior`` has a direct link with the resulting number of components with non-zero weights. Specifying a low value for the concentration prior will make the model put most of the weight on few components set the remaining components weights very close to zero. High values of the concentration prior will allow a larger number of components to be active in the mixture. The Dirichlet process prior allows to define an infinite number of components and automatically selects the correct number of components: it activates a component only if it is necessary. On the contrary the classical finite mixture model with a Dirichlet distribution prior will favor more uniformly weighted components and therefore tends to divide natural clusters into unnecessary sub-components. """ # Author: Thierry Guillemot <thierry.guillemot.work@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from sklearn.mixture import BayesianGaussianMixture print(__doc__) def plot_ellipses(ax, weights, means, covars): for n in range(means.shape[0]): eig_vals, eig_vecs = np.linalg.eigh(covars[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) # Ellipse needs degrees angle = 180 * angle / np.pi # eigenvector normalization eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1], 180 + angle) ell.set_clip_box(ax.bbox) ell.set_alpha(weights[n]) ell.set_facecolor('#56B4E9') ax.add_artist(ell) def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8) ax1.set_xlim(-2., 2.) ax1.set_ylim(-3., 3.) ax1.set_xticks(()) ax1.set_yticks(()) plot_ellipses(ax1, estimator.weights_, estimator.means_, estimator.covariances_) ax2.get_xaxis().set_tick_params(direction='out') ax2.yaxis.grid(True, alpha=0.7) for k, w in enumerate(estimator.weights_): ax2.bar(k - .45, w, width=0.9, color='#56B4E9', zorder=3) ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.), horizontalalignment='center') ax2.set_xlim(-.6, 2 * n_components - .4) ax2.set_ylim(0., 1.1) ax2.tick_params(axis='y', which='both', left='off', right='off', labelleft='off') ax2.tick_params(axis='x', which='both', top='off') if plot_title: ax1.set_ylabel('Estimated Mixtures') ax2.set_ylabel('Weight of each component') # Parameters of the dataset random_state, n_components, n_features = 2, 3, 2 colors = np.array(['#0072B2', '#F0E442', '#D55E00']) covars = np.array([[[.7, .0], [.0, .1]], [[.5, .0], [.0, .1]], [[.5, .0], [.0, .1]]]) samples = np.array([200, 500, 200]) means = np.array([[.0, -.70], [.0, .0], [.0, .70]]) # mean_precision_prior= 0.8 to minimize the influence of the prior estimators = [ ("Finite mixture with a Dirichlet distribution\nprior and " r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_distribution", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [0.001, 1, 1000]), ("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_process", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [1, 1000, 100000])] # Generate data rng = np.random.RandomState(random_state) X = np.vstack([ rng.multivariate_normal(means[j], covars[j], samples[j]) for j in range(n_components)]) y = np.concatenate([j * np.ones(samples[j], dtype=int) for j in range(n_components)]) # Plot results in two different figures for (title, estimator, concentrations_prior) in estimators: plt.figure(figsize=(4.7 * 3, 8)) plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05, left=.03, right=.99) gs = gridspec.GridSpec(3, len(concentrations_prior)) for k, concentration in enumerate(concentrations_prior): estimator.weight_concentration_prior = concentration estimator.fit(X) plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator, X, y, r"%s$%.1e$" % (title, concentration), plot_title=k == 0) plt.show()
bsd-3-clause
spbguru/repo1
external/linux32/lib/python2.6/site-packages/matplotlib/projections/polar.py
69
20981
import math import numpy as npy import matplotlib rcParams = matplotlib.rcParams from matplotlib.artist import kwdocd from matplotlib.axes import Axes from matplotlib import cbook from matplotlib.patches import Circle from matplotlib.path import Path from matplotlib.ticker import Formatter, Locator from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \ BboxTransformTo, IdentityTransform, Transform, TransformWrapper class PolarAxes(Axes): """ A polar graph projection, where the input dimensions are *theta*, *r*. Theta starts pointing east and goes anti-clockwise. """ name = 'polar' class PolarTransform(Transform): """ The base polar transform. This handles projection *theta* and *r* into Cartesian coordinate space *x* and *y*, but does not perform the ultimate affine transformation into the correct position. """ input_dims = 2 output_dims = 2 is_separable = False def __init__(self, resolution): """ Create a new polar transform. Resolution is the number of steps to interpolate between each input line segment to approximate its path in curved polar space. """ Transform.__init__(self) self._resolution = resolution def transform(self, tr): xy = npy.zeros(tr.shape, npy.float_) t = tr[:, 0:1] r = tr[:, 1:2] x = xy[:, 0:1] y = xy[:, 1:2] x[:] = r * npy.cos(t) y[:] = r * npy.sin(t) return xy transform.__doc__ = Transform.transform.__doc__ transform_non_affine = transform transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def transform_path(self, path): vertices = path.vertices t = vertices[:, 0:1] t[t != (npy.pi * 2.0)] %= (npy.pi * 2.0) if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]: return Path(self.transform(vertices), path.codes) ipath = path.interpolated(self._resolution) return Path(self.transform(ipath.vertices), ipath.codes) transform_path.__doc__ = Transform.transform_path.__doc__ transform_path_non_affine = transform_path transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__ def inverted(self): return PolarAxes.InvertedPolarTransform(self._resolution) inverted.__doc__ = Transform.inverted.__doc__ class PolarAffine(Affine2DBase): """ The affine part of the polar projection. Scales the output so that maximum radius rests on the edge of the axes circle. """ def __init__(self, scale_transform, limits): u""" *limits* is the view limit of the data. The only part of its bounds that is used is ymax (for the radius maximum). The theta range is always fixed to (0, 2\u03c0). """ Affine2DBase.__init__(self) self._scale_transform = scale_transform self._limits = limits self.set_children(scale_transform, limits) self._mtx = None def get_matrix(self): if self._invalid: limits_scaled = self._limits.transformed(self._scale_transform) ymax = limits_scaled.ymax affine = Affine2D() \ .scale(0.5 / ymax) \ .translate(0.5, 0.5) self._mtx = affine.get_matrix() self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class InvertedPolarTransform(Transform): """ The inverse of the polar transform, mapping Cartesian coordinate space *x* and *y* back to *theta* and *r*. """ input_dims = 2 output_dims = 2 is_separable = False def __init__(self, resolution): Transform.__init__(self) self._resolution = resolution def transform(self, xy): x = xy[:, 0:1] y = xy[:, 1:] r = npy.sqrt(x*x + y*y) theta = npy.arccos(x / r) theta = npy.where(y < 0, 2 * npy.pi - theta, theta) return npy.concatenate((theta, r), 1) transform.__doc__ = Transform.transform.__doc__ def inverted(self): return PolarAxes.PolarTransform(self._resolution) inverted.__doc__ = Transform.inverted.__doc__ class ThetaFormatter(Formatter): u""" Used to format the *theta* tick labels. Converts the native unit of radians into degrees and adds a degree symbol (\u00b0). """ def __call__(self, x, pos=None): # \u00b0 : degree symbol if rcParams['text.usetex'] and not rcParams['text.latex.unicode']: return r"$%0.0f^\circ$" % ((x / npy.pi) * 180.0) else: # we use unicode, rather than mathtext with \circ, so # that it will work correctly with any arbitrary font # (assuming it has a degree sign), whereas $5\circ$ # will only work correctly with one of the supported # math fonts (Computer Modern and STIX) return u"%0.0f\u00b0" % ((x / npy.pi) * 180.0) class RadialLocator(Locator): """ Used to locate radius ticks. Ensures that all ticks are strictly positive. For all other tasks, it delegates to the base :class:`~matplotlib.ticker.Locator` (which may be different depending on the scale of the *r*-axis. """ def __init__(self, base): self.base = base def __call__(self): ticks = self.base() return [x for x in ticks if x > 0] def autoscale(self): return self.base.autoscale() def pan(self, numsteps): return self.base.pan(numsteps) def zoom(self, direction): return self.base.zoom(direction) def refresh(self): return self.base.refresh() RESOLUTION = 75 def __init__(self, *args, **kwargs): """ Create a new Polar Axes for a polar plot. """ self._rpad = 0.05 self.resolution = kwargs.pop('resolution', self.RESOLUTION) Axes.__init__(self, *args, **kwargs) self.set_aspect('equal', adjustable='box', anchor='C') self.cla() __init__.__doc__ = Axes.__init__.__doc__ def cla(self): Axes.cla(self) self.title.set_y(1.05) self.xaxis.set_major_formatter(self.ThetaFormatter()) angles = npy.arange(0.0, 360.0, 45.0) self.set_thetagrids(angles) self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator())) self.grid(rcParams['polaraxes.grid']) self.xaxis.set_ticks_position('none') self.yaxis.set_ticks_position('none') def _set_lim_and_transforms(self): self.transAxes = BboxTransformTo(self.bbox) # Transforms the x and y axis separately by a scale factor # It is assumed that this part will have non-linear components self.transScale = TransformWrapper(IdentityTransform()) # A (possibly non-linear) projection on the (already scaled) data self.transProjection = self.PolarTransform(self.resolution) # An affine transformation on the data, generally to limit the # range of the axes self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim) # The complete data transformation stack -- from data all the # way to display coordinates self.transData = self.transScale + self.transProjection + \ (self.transProjectionAffine + self.transAxes) # This is the transform for theta-axis ticks. It is # equivalent to transData, except it always puts r == 1.0 at # the edge of the axis circle. self._xaxis_transform = ( self.transProjection + self.PolarAffine(IdentityTransform(), Bbox.unit()) + self.transAxes) # The theta labels are moved from radius == 0.0 to radius == 1.1 self._theta_label1_position = Affine2D().translate(0.0, 1.1) self._xaxis_text1_transform = ( self._theta_label1_position + self._xaxis_transform) self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1) self._xaxis_text2_transform = ( self._theta_label2_position + self._xaxis_transform) # This is the transform for r-axis ticks. It scales the theta # axis so the gridlines from 0.0 to 1.0, now go from 0.0 to # 2pi. self._yaxis_transform = ( Affine2D().scale(npy.pi * 2.0, 1.0) + self.transData) # The r-axis labels are put at an angle and padded in the r-direction self._r_label1_position = Affine2D().translate(22.5, self._rpad) self._yaxis_text1_transform = ( self._r_label1_position + Affine2D().scale(1.0 / 360.0, 1.0) + self._yaxis_transform ) self._r_label2_position = Affine2D().translate(22.5, self._rpad) self._yaxis_text2_transform = ( self._r_label2_position + Affine2D().scale(1.0 / 360.0, 1.0) + self._yaxis_transform ) def get_xaxis_transform(self): return self._xaxis_transform def get_xaxis_text1_transform(self, pad): return self._xaxis_text1_transform, 'center', 'center' def get_xaxis_text2_transform(self, pad): return self._xaxis_text2_transform, 'center', 'center' def get_yaxis_transform(self): return self._yaxis_transform def get_yaxis_text1_transform(self, pad): return self._yaxis_text1_transform, 'center', 'center' def get_yaxis_text2_transform(self, pad): return self._yaxis_text2_transform, 'center', 'center' def _gen_axes_patch(self): return Circle((0.5, 0.5), 0.5) def set_rmax(self, rmax): self.viewLim.y1 = rmax angle = self._r_label1_position.to_values()[4] self._r_label1_position.clear().translate( angle, rmax * self._rpad) self._r_label2_position.clear().translate( angle, -rmax * self._rpad) def get_rmax(self): return self.viewLim.ymax def set_yscale(self, *args, **kwargs): Axes.set_yscale(self, *args, **kwargs) self.yaxis.set_major_locator( self.RadialLocator(self.yaxis.get_major_locator())) set_rscale = Axes.set_yscale set_rticks = Axes.set_yticks def set_thetagrids(self, angles, labels=None, frac=None, **kwargs): """ Set the angles at which to place the theta grids (these gridlines are equal along the theta dimension). *angles* is in degrees. *labels*, if not None, is a ``len(angles)`` list of strings of the labels to use at each angle. If *labels* is None, the labels will be ``fmt %% angle`` *frac* is the fraction of the polar axes radius at which to place the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95 is inside the axes. Return value is a list of tuples (*line*, *label*), where *line* is :class:`~matplotlib.lines.Line2D` instances and the *label* is :class:`~matplotlib.text.Text` instances. kwargs are optional text properties for the labels: %(Text)s ACCEPTS: sequence of floats """ angles = npy.asarray(angles, npy.float_) self.set_xticks(angles * (npy.pi / 180.0)) if labels is not None: self.set_xticklabels(labels) if frac is not None: self._theta_label1_position.clear().translate(0.0, frac) self._theta_label2_position.clear().translate(0.0, 1.0 / frac) for t in self.xaxis.get_ticklabels(): t.update(kwargs) return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels() set_thetagrids.__doc__ = cbook.dedent(set_thetagrids.__doc__) % kwdocd def set_rgrids(self, radii, labels=None, angle=None, rpad=None, **kwargs): """ Set the radial locations and labels of the *r* grids. The labels will appear at radial distances *radii* at the given *angle* in degrees. *labels*, if not None, is a ``len(radii)`` list of strings of the labels to use at each radius. If *labels* is None, the built-in formatter will be used. *rpad* is a fraction of the max of *radii* which will pad each of the radial labels in the radial direction. Return value is a list of tuples (*line*, *label*), where *line* is :class:`~matplotlib.lines.Line2D` instances and the *label* is :class:`~matplotlib.text.Text` instances. kwargs are optional text properties for the labels: %(Text)s ACCEPTS: sequence of floats """ radii = npy.asarray(radii) rmin = radii.min() if rmin <= 0: raise ValueError('radial grids must be strictly positive') self.set_yticks(radii) if labels is not None: self.set_yticklabels(labels) if angle is None: angle = self._r_label1_position.to_values()[4] if rpad is not None: self._rpad = rpad rmax = self.get_rmax() self._r_label1_position.clear().translate(angle, self._rpad * rmax) self._r_label2_position.clear().translate(angle, -self._rpad * rmax) for t in self.yaxis.get_ticklabels(): t.update(kwargs) return self.yaxis.get_ticklines(), self.yaxis.get_ticklabels() set_rgrids.__doc__ = cbook.dedent(set_rgrids.__doc__) % kwdocd def set_xscale(self, scale, *args, **kwargs): if scale != 'linear': raise NotImplementedError("You can not set the xscale on a polar plot.") def set_xlim(self, *args, **kargs): # The xlim is fixed, no matter what you do self.viewLim.intervalx = (0.0, npy.pi * 2.0) def format_coord(self, theta, r): """ Return a format string formatting the coordinate using Unicode characters. """ theta /= math.pi # \u03b8: lower-case theta # \u03c0: lower-case pi # \u00b0: degree symbol return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r) def get_data_ratio(self): ''' Return the aspect ratio of the data itself. For a polar plot, this should always be 1.0 ''' return 1.0 ### Interactive panning def can_zoom(self): """ Return True if this axes support the zoom box """ return False def start_pan(self, x, y, button): angle = self._r_label1_position.to_values()[4] / 180.0 * npy.pi mode = '' if button == 1: epsilon = npy.pi / 45.0 t, r = self.transData.inverted().transform_point((x, y)) if t >= angle - epsilon and t <= angle + epsilon: mode = 'drag_r_labels' elif button == 3: mode = 'zoom' self._pan_start = cbook.Bunch( rmax = self.get_rmax(), trans = self.transData.frozen(), trans_inverse = self.transData.inverted().frozen(), r_label_angle = self._r_label1_position.to_values()[4], x = x, y = y, mode = mode ) def end_pan(self): del self._pan_start def drag_pan(self, button, key, x, y): p = self._pan_start if p.mode == 'drag_r_labels': startt, startr = p.trans_inverse.transform_point((p.x, p.y)) t, r = p.trans_inverse.transform_point((x, y)) # Deal with theta dt0 = t - startt dt1 = startt - t if abs(dt1) < abs(dt0): dt = abs(dt1) * sign(dt0) * -1.0 else: dt = dt0 * -1.0 dt = (dt / npy.pi) * 180.0 rpad = self._r_label1_position.to_values()[5] self._r_label1_position.clear().translate( p.r_label_angle - dt, rpad) self._r_label2_position.clear().translate( p.r_label_angle - dt, -rpad) elif p.mode == 'zoom': startt, startr = p.trans_inverse.transform_point((p.x, p.y)) t, r = p.trans_inverse.transform_point((x, y)) dr = r - startr # Deal with r scale = r / startr self.set_rmax(p.rmax / scale) # These are a couple of aborted attempts to project a polar plot using # cubic bezier curves. # def transform_path(self, path): # twopi = 2.0 * npy.pi # halfpi = 0.5 * npy.pi # vertices = path.vertices # t0 = vertices[0:-1, 0] # t1 = vertices[1: , 0] # td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1)) # maxtd = td.max() # interpolate = npy.ceil(maxtd / halfpi) # if interpolate > 1.0: # vertices = self.interpolate(vertices, interpolate) # vertices = self.transform(vertices) # result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_) # codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type) # result[0] = vertices[0] # codes[0] = mpath.Path.MOVETO # kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0) # kappa = 0.5 # p0 = vertices[0:-1] # p1 = vertices[1: ] # x0 = p0[:, 0:1] # y0 = p0[:, 1: ] # b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0) # a0 = y0 - b0*x0 # x1 = p1[:, 0:1] # y1 = p1[:, 1: ] # b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1) # a1 = y1 - b1*x1 # x = -(a0-a1) / (b0-b1) # y = a0 + b0*x # xk = (x - x0) * kappa + x0 # yk = (y - y0) * kappa + y0 # result[1::3, 0:1] = xk # result[1::3, 1: ] = yk # xk = (x - x1) * kappa + x1 # yk = (y - y1) * kappa + y1 # result[2::3, 0:1] = xk # result[2::3, 1: ] = yk # result[3::3] = p1 # print vertices[-2:] # print result[-2:] # return mpath.Path(result, codes) # twopi = 2.0 * npy.pi # halfpi = 0.5 * npy.pi # vertices = path.vertices # t0 = vertices[0:-1, 0] # t1 = vertices[1: , 0] # td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1)) # maxtd = td.max() # interpolate = npy.ceil(maxtd / halfpi) # print "interpolate", interpolate # if interpolate > 1.0: # vertices = self.interpolate(vertices, interpolate) # result = npy.zeros((len(vertices) * 3 - 2, 2), npy.float_) # codes = mpath.Path.CURVE4 * npy.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type) # result[0] = vertices[0] # codes[0] = mpath.Path.MOVETO # kappa = 4.0 * ((npy.sqrt(2.0) - 1.0) / 3.0) # tkappa = npy.arctan(kappa) # hyp_kappa = npy.sqrt(kappa*kappa + 1.0) # t0 = vertices[0:-1, 0] # t1 = vertices[1: , 0] # r0 = vertices[0:-1, 1] # r1 = vertices[1: , 1] # td = npy.where(t1 > t0, t1 - t0, twopi - (t0 - t1)) # td_scaled = td / (npy.pi * 0.5) # rd = r1 - r0 # r0kappa = r0 * kappa * td_scaled # r1kappa = r1 * kappa * td_scaled # ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled # result[1::3, 0] = t0 + (tkappa * td_scaled) # result[1::3, 1] = r0*hyp_kappa # # result[1::3, 1] = r0 / npy.cos(tkappa * td_scaled) # npy.sqrt(r0*r0 + ravg_kappa*ravg_kappa) # result[2::3, 0] = t1 - (tkappa * td_scaled) # result[2::3, 1] = r1*hyp_kappa # # result[2::3, 1] = r1 / npy.cos(tkappa * td_scaled) # npy.sqrt(r1*r1 + ravg_kappa*ravg_kappa) # result[3::3, 0] = t1 # result[3::3, 1] = r1 # print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa # result = self.transform(result) # return mpath.Path(result, codes) # transform_path_non_affine = transform_path
gpl-3.0
databricks/spark-sklearn
python/doc/conf.py
1
8435
# -*- coding: utf-8 -*- # # spark_sklearn documentation build configuration file, created by # sphinx-quickstart on Wed Dec 16 10:51:51 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'spark_sklearn' copyright = u'2015, Joseph Bradley, Tim Hunter, Vladimir Feinberg' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = os.environ.get('PACKAGE_VERSION', 'Unknown') # The short X.Y version. version = '.'.join(release.split('.')[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'spark_sklearndoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'spark_sklearn.tex', u'spark\\_sklearn Documentation', u'Joseph Bradley, Tim Hunter', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'spark_sklearn', u'spark_sklearn Documentation', [u'Joseph Bradley, Tim Hunter'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'spark_sklearn', u'spark_sklearn Documentation', u'Joseph Bradley, Tim Hunter', 'spark_sklearn', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # __init__ info to go into class doc autoclass_content = 'both'
apache-2.0
fidelram/deepTools
deeptools/computeGCBias.py
1
30954
#!/usr/bin/env python # -*- coding: utf-8 -*- import time import multiprocessing import numpy as np import argparse from scipy.stats import poisson import py2bit import sys from deeptoolsintervals import GTF from deeptools.utilities import tbitToBamChrName, getGC_content from deeptools import parserCommon, mapReduce from deeptools.getFragmentAndReadSize import get_read_and_fragment_length from deeptools import bamHandler debug = 0 old_settings = np.seterr(all='ignore') def parse_arguments(args=None): parentParser = parserCommon.getParentArgParse(binSize=False, blackList=True) requiredArgs = getRequiredArgs() parser = argparse.ArgumentParser( parents=[requiredArgs, parentParser], formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Computes the GC-bias using Benjamini\'s method ' '[Benjamini & Speed (2012). Nucleic Acids Research, 40(10). doi: 10.1093/nar/gks001]. ' 'The GC-bias is visualized and the resulting table can be used to' 'correct the bias with `correctGCBias`.', usage='\n computeGCBias ' '-b file.bam --effectiveGenomeSize 2150570000 -g mm9.2bit -l 200 --GCbiasFrequenciesFile freq.txt [options]', conflict_handler='resolve', add_help=False) return parser def getRequiredArgs(): parser = argparse.ArgumentParser(add_help=False) required = parser.add_argument_group('Required arguments') required.add_argument('--bamfile', '-b', metavar='bam file', help='Sorted BAM file. ', required=True) required.add_argument('--effectiveGenomeSize', help='The effective genome size is the portion ' 'of the genome that is mappable. Large fractions of ' 'the genome are stretches of NNNN that should be ' 'discarded. Also, if repetitive regions were not ' 'included in the mapping of reads, the effective ' 'genome size needs to be adjusted accordingly. ' 'A table of values is available here: ' 'http://deeptools.readthedocs.io/en/latest/content/feature/effectiveGenomeSize.html .', default=None, type=int, required=True) required.add_argument('--genome', '-g', help='Genome in two bit format. Most genomes can be ' 'found here: http://hgdownload.cse.ucsc.edu/gbdb/ ' 'Search for the .2bit ending. Otherwise, fasta ' 'files can be converted to 2bit using the UCSC ' 'programm called faToTwoBit available for different ' 'plattforms at ' 'http://hgdownload.cse.ucsc.edu/admin/exe/', metavar='2bit FILE', required=True) required.add_argument('--GCbiasFrequenciesFile', '-freq', '-o', help='Path to save the file containing ' 'the observed and expected read frequencies per %%GC-' 'content. This file is needed to run the ' 'correctGCBias tool. This is a text file.', type=argparse.FileType('w'), metavar='FILE', required=True) # define the optional arguments optional = parser.add_argument_group('Optional arguments') optional.add_argument('--fragmentLength', '-l', help='Fragment length used for the sequencing. If ' 'paired-end reads are used, the fragment length is ' 'computed based from the bam file', type=int) optional.add_argument("--help", "-h", action="help", help="show this help message and exit") optional.add_argument('--sampleSize', default=5e7, help='Number of sampling points to be considered. (Default: %(default)s)', type=int) optional.add_argument('--extraSampling', help='BED file containing genomic regions for which ' 'extra sampling is required because they are ' 'underrepresented in the genome.', type=argparse.FileType('r'), metavar='BED file') plot = parser.add_argument_group('Diagnostic plot options') plot.add_argument('--biasPlot', metavar='FILE NAME', help='If given, a diagnostic image summarizing ' 'the GC-bias will be saved.') plot.add_argument('--plotFileFormat', metavar='', help='image format type. If given, this ' 'option overrides the ' 'image format based on the plotFile ending. ' 'The available options are: "png", ' '"eps", "pdf", "plotly" and "svg"', choices=['png', 'pdf', 'svg', 'eps', 'plotly']) plot.add_argument('--regionSize', metavar='INT', type=int, default=300, help='To plot the reads per %%GC over a region' 'the size of the region is required. By default, ' 'the bin size is set to 300 bases, which is close to the ' 'standard fragment size for Illumina machines. However, ' 'if the depth of sequencing is low, a larger bin size ' 'will be required, otherwise many bins will not ' 'overlap with any read (Default: %(default)s)') return parser def getPositionsToSample(chrom, start, end, stepSize): """ check if the region submitted to the worker overlaps with the region to take extra effort to sample. If that is the case, the regions to sample array is increased to match each of the positions in the extra effort region sampled at the same stepSize along the interval. If a filter out tree is given, then from positions to sample those regions are cleaned """ positions_to_sample = np.arange(start, end, stepSize) if global_vars['filter_out']: filter_out_tree = GTF(global_vars['filter_out']) else: filter_out_tree = None if global_vars['extra_sampling_file']: extra_tree = GTF(global_vars['extra_sampling_file']) else: extra_tree = None if extra_tree: orig_len = len(positions_to_sample) try: extra_match = extra_tree.findOverlaps(chrom, start, end) except KeyError: extra_match = [] if len(extra_match) > 0: for intval in extra_match: positions_to_sample = np.append(positions_to_sample, list(range(intval[0], intval[1], stepSize))) # remove duplicates positions_to_sample = np.unique(np.sort(positions_to_sample)) if debug: print("sampling increased to {} from {}".format( len(positions_to_sample), orig_len)) # skip regions that are filtered out if filter_out_tree: try: out_match = filter_out_tree.findOverlaps(chrom, start, end) except KeyError: out_match = [] if len(out_match) > 0: for intval in out_match: positions_to_sample = \ positions_to_sample[(positions_to_sample < intval[0]) | (positions_to_sample >= intval[1])] return positions_to_sample def countReadsPerGC_wrapper(args): return countReadsPerGC_worker(*args) def countReadsPerGC_worker(chromNameBam, start, end, stepSize, regionSize, chrNameBamToBit, verbose=False): """given a genome region defined by (start, end), the GC content is quantified for regions of size regionSize that are contiguous """ chromNameBit = chrNameBamToBit[chromNameBam] tbit = py2bit.open(global_vars['2bit']) bam = bamHandler.openBam(global_vars['bam']) c = 1 sub_reads_per_gc = [] positions_to_sample = getPositionsToSample(chromNameBit, start, end, stepSize) for index in range(len(positions_to_sample)): i = positions_to_sample[index] # stop if region extends over the chromosome end if tbit.chroms(chromNameBit) < i + regionSize: break try: gc = getGC_content(tbit, chromNameBit, int(i), int(i + regionSize)) except Exception as detail: if verbose: print("{}:{}-{}".format(chromNameBit, i, i + regionSize)) print(detail) continue numberReads = bam.count(chromNameBam, i, i + regionSize) sub_reads_per_gc.append((numberReads, gc)) c += 1 return sub_reads_per_gc def tabulateGCcontent_wrapper(args): return tabulateGCcontent_worker(*args) def tabulateGCcontent_worker(chromNameBam, start, end, stepSize, fragmentLength, chrNameBamToBit, verbose=False): r""" given genome regions, the GC content of the genome is tabulated for fragments of length 'fragmentLength' each 'stepSize' positions. >>> test = Tester() >>> args = test.testTabulateGCcontentWorker() >>> N_gc, F_gc = tabulateGCcontent_worker(*args) The forward read positions are: [1, 4, 10, 10, 16, 18] which correspond to a GC of [1, 1, 1, 1, 2, 1] The evaluated position are [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] the corresponding GC is [2, 1, 1, 2, 2, 1, 2, 3, 2, 1] >>> print(N_gc) [0 4 5 1] >>> print(F_gc) [0 4 1 0] >>> test.set_filter_out_file() >>> chrNameBam2bit = {'2L': 'chr2L'} Test for the filter out option >>> N_gc, F_gc = tabulateGCcontent_worker('2L', 0, 20, 2, ... {'median': 3}, chrNameBam2bit) >>> test.unset_filter_out_file() The evaluated positions are [ 0 2 8 10 12 14 16 18] >>> print(N_gc) [0 3 4 1] >>> print(F_gc) [0 3 1 0] Test for extra_sampling option >>> test.set_extra_sampling_file() >>> chrNameBam2bit = {'2L': 'chr2L'} >>> res = tabulateGCcontent_worker('2L', 0, 20, 2, ... {'median': 3}, chrNameBam2bit) The new positions evaluated are [0, 1, 2, 3, 4, 6, 8, 10, 12, 14, 16, 18] and the GC is [2, 1, 1, 0, 1, 2, 2, 1, 2, 3, 2, 1] >>> print(res[0]) [1 5 5 1] >>> print(res[1]) [0 5 1 0] """ if start > end: raise NameError("start %d bigger that end %d" % (start, end)) chromNameBit = chrNameBamToBit[chromNameBam] # array to keep track of the GC from regions of length 'fragmentLength' # from the genome. The index of the array is used to # indicate the gc content. The values inside the # array are counts. Thus, if N_gc[10] = 3, that means # that 3 regions have a gc_content of 10. subN_gc = np.zeros(fragmentLength['median'] + 1, dtype='int') subF_gc = np.zeros(fragmentLength['median'] + 1, dtype='int') tbit = py2bit.open(global_vars['2bit']) bam = bamHandler.openBam(global_vars['bam']) peak = 0 startTime = time.time() if verbose: print("[{:.3f}] computing positions to " "sample".format(time.time() - startTime)) positions_to_sample = getPositionsToSample(chromNameBit, start, end, stepSize) read_counts = [] # Optimize IO. # if the sample regions are far apart from each # other is faster to go to each location and fetch # the reads found there. # Otherwise, if the regions to sample are close to # each other, is faster to load all the reads in # a large region into memory and consider only # those falling into the positions to sample. # The following code gets the reads # that are at sampling positions that lie close together if np.mean(np.diff(positions_to_sample)) < 1000: start_pos = min(positions_to_sample) end_pos = max(positions_to_sample) if verbose: print("[{:.3f}] caching reads".format(time.time() - startTime)) counts = np.bincount([r.pos - start_pos for r in bam.fetch(chromNameBam, start_pos, end_pos + 1) if not r.is_reverse and not r.is_unmapped and r.pos >= start_pos], minlength=end_pos - start_pos + 2) read_counts = counts[positions_to_sample - min(positions_to_sample)] if verbose: print("[{:.3f}] finish caching reads.".format( time.time() - startTime)) countTime = time.time() c = 1 for index in range(len(positions_to_sample)): i = positions_to_sample[index] # stop if the end of the chromosome is reached if i + fragmentLength['median'] > tbit.chroms(chromNameBit): break try: gc = getGC_content(tbit, chromNameBit, int(i), int(i + fragmentLength['median']), fraction=False) except Exception as detail: if verbose: print(detail) continue subN_gc[gc] += 1 # count all reads at position 'i' if len(read_counts) == 0: # case when no cache was done num_reads = len([x.pos for x in bam.fetch(chromNameBam, i, i + 1) if x.is_reverse is False and x.pos == i]) else: num_reads = read_counts[index] if num_reads >= global_vars['max_reads']: peak += 1 continue subF_gc[gc] += num_reads if verbose: if index % 50000 == 0: endTime = time.time() print("%s processing %d (%.1f per sec) @ %s:%s-%s %s" % (multiprocessing.current_process().name, index, index / (endTime - countTime), chromNameBit, start, end, stepSize)) c += 1 if verbose: endTime = time.time() print("%s processing %d (%.1f per sec) @ %s:%s-%s %s" % (multiprocessing.current_process().name, index, index / (endTime - countTime), chromNameBit, start, end, stepSize)) print("%s total time %.1f @ %s:%s-%s %s" % (multiprocessing.current_process().name, (endTime - startTime), chromNameBit, start, end, stepSize)) return(subN_gc, subF_gc) def tabulateGCcontent(fragmentLength, chrNameBitToBam, stepSize, chromSizes, numberOfProcessors=None, verbose=False, region=None): r""" Subdivides the genome or the reads into chunks to be analyzed in parallel using several processors. This codes handles the creation of workers that tabulate the GC content for small regions and then collects and integrates the results >>> test = Tester() >>> arg = test.testTabulateGCcontent() >>> res = tabulateGCcontent(*arg) >>> res array([[ 0. , 18. , 1. ], [ 3. , 63. , 0.45815996], [ 7. , 159. , 0.42358185], [ 25. , 192. , 1.25278115], [ 28. , 215. , 1.25301422], [ 16. , 214. , 0.71935396], [ 12. , 95. , 1.21532959], [ 9. , 24. , 3.60800971], [ 3. , 11. , 2.62400706], [ 0. , 0. , 1. ], [ 0. , 0. , 1. ]]) """ global global_vars chrNameBamToBit = dict([(v, k) for k, v in chrNameBitToBam.items()]) chunkSize = int(min(2e6, 4e5 / global_vars['reads_per_bp'])) chromSizes = [(k, v) for k, v in chromSizes if k in list(chrNameBamToBit.keys())] imap_res = mapReduce.mapReduce((stepSize, fragmentLength, chrNameBamToBit, verbose), tabulateGCcontent_wrapper, chromSizes, genomeChunkLength=chunkSize, numberOfProcessors=numberOfProcessors, region=region) for subN_gc, subF_gc in imap_res: try: F_gc += subF_gc N_gc += subN_gc except NameError: F_gc = subF_gc N_gc = subN_gc if sum(F_gc) == 0: sys.exit("No fragments included in the sampling! Consider decreasing (or maybe increasing) the --sampleSize parameter") scaling = float(sum(N_gc)) / float(sum(F_gc)) R_gc = np.array([float(F_gc[x]) / N_gc[x] * scaling if N_gc[x] and F_gc[x] > 0 else 1 for x in range(len(F_gc))]) data = np.transpose(np.vstack((F_gc, N_gc, R_gc))) return data def countReadsPerGC(regionSize, chrNameBitToBam, stepSize, chromSizes, numberOfProcessors=None, verbose=False, region=None): r""" Computes for a region of size regionSize, the GC of the region and the number of reads that overlap it. >>> test = Tester() >>> arg = test.testCountReadsPerGC() >>> reads_per_gc = countReadsPerGC(*arg) >>> reads_per_gc[0:5,:] array([[132. , 0.44 ], [132. , 0.44 ], [133. , 0.44 ], [134. , 0.43666667], [134. , 0.44 ]]) """ global global_vars chrNameBamToBit = dict([(v, k) for k, v in chrNameBitToBam.items()]) chunkSize = int(min(2e6, 4e5 / global_vars['reads_per_bp'])) imap_res = mapReduce.mapReduce((stepSize, regionSize, chrNameBamToBit, verbose), countReadsPerGC_wrapper, chromSizes, genomeChunkLength=chunkSize, numberOfProcessors=numberOfProcessors, region=region) reads_per_gc = [] for sub_reads_per_gc in imap_res: reads_per_gc += sub_reads_per_gc reads_per_gc = np.asarray(reads_per_gc) return reads_per_gc def smooth(x, window_len=3): """ *CURRENTLY* not being used smooths the values from the frequencies by taking the average of 'window_len' values. window_len has to be an odd number """ # do not smooth small arrays if len(x) < window_len * 2: return x i = 0 y = x[:] half_width = (window_len - 1) / 2 for i in range(0, len(x)): if i < half_width or i + half_width + 1 > len(x): continue else: y[i] = np.mean(x[i - half_width:i + half_width + 1]) # clip low values, this avoid problems with zeros return y def bin_by(x, y, nbins=10): """ Bin x by y. Returns the binned "x" values and the left edges of the bins """ bins = np.linspace(0, 1, nbins + 1) # To avoid extra bin for the max value bins[-1] += 1 indices = np.digitize(y, bins) output = [] for i in range(1, len(bins)): output.append(x[indices == i]) # Just return the left edges of the bins bins = bins[:-1] return output, bins def plotlyGCbias(file_name, frequencies, reads_per_gc, region_size): import plotly.offline as py import plotly.graph_objs as go import matplotlib.cbook as cbook fig = go.Figure() fig['layout']['xaxis1'] = dict(domain=[0.0, 1.0], anchor="y1", title="GC fraction") fig['layout']['yaxis1'] = dict(domain=[0.55, 1.0], anchor="x1", title="Number of reads") fig['layout']['xaxis2'] = dict(domain=[0.0, 1.0], anchor="y2", title="GC fraction", range=[0.2, 0.7]) fig['layout']['yaxis2'] = dict(domain=[0.0, 0.45], anchor="x2", title="log2(observed/expected)") text = "reads per {} base region".format(region_size) annos = [{'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': text, 'y': 1.0, 'x': 0.5, 'font': {'size': 16}, 'showarrow': False}] text = "normalized observed/expected read counts" annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': text, 'y': 0.5, 'x': 0.5, 'font': {'size': 16}, 'showarrow': False}) # prepare data for boxplot reads, GC = reads_per_gc.T reads_per_gc, bin_labels = bin_by(reads, GC, nbins=100) to_keep = [idx for idx, x in enumerate(bin_labels) if 0.2 <= x <= 0.7] reads_per_gc = [reads_per_gc[x] for x in to_keep] bin_labels = [bin_labels[x] for x in to_keep] # produce the same boxplot as matplotlib as vastly reduce the output file size bins = [] for b in reads_per_gc: s = cbook.boxplot_stats(b)[0] bins.append([s['whislo'], s['q1'], s['q1'], s['med'], s['med'], s['med'], s['q3'], s['q3'], s['whishi']]) data = [] # top plot for x, y in zip(bin_labels, bins): trace = go.Box(x=x, y=y, xaxis='x1', yaxis='y1', boxpoints='outliers', showlegend=False, name="{}".format(x), line=dict(color='rgb(107,174,214)')) data.append(trace) # bottom plot x = np.linspace(0, 1, frequencies.shape[0]) trace = go.Scatter(x=x, y=np.log2(frequencies[:, 2]), xaxis='x2', yaxis='y2', showlegend=False, line=dict(color='rgb(107,174,214)')) data.append(trace) fig['data'] = data fig['layout']['annotations'] = annos py.plot(fig, filename=file_name, auto_open=False) def plotGCbias(file_name, frequencies, reads_per_gc, region_size, image_format=None): import matplotlib matplotlib.use('Agg') matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['svg.fonttype'] = 'none' import matplotlib.pyplot as plt # prepare data for boxplot reads, GC = reads_per_gc.T reads_per_gc, bin_labels = bin_by(reads, GC, nbins=100) to_keep = [idx for idx, x in enumerate(bin_labels) if 0.2 <= x <= 0.7] reads_per_gc = [reads_per_gc[x] for x in to_keep] bin_labels = [bin_labels[x] for x in to_keep] title = "reads per regions of {} bp".format(region_size) fig = plt.figure(figsize=(6, 8)) ax1 = fig.add_subplot(211, title=title) ax2 = fig.add_subplot(212, title='normalized observed/expected read counts') # make boxplot bp = ax1.boxplot(reads_per_gc, notch=0, patch_artist=True) plt.setp(bp['boxes'], color='black', facecolor='LightGreen') plt.setp(bp['medians'], color='black') plt.setp(bp['whiskers'], color='black', linestyle='dashed') plt.setp(bp['fliers'], marker='None') # get the whisker that spands the most y_max = max([x.get_data()[1][1] for x in bp['whiskers']]) ax1.set_ylim(0 - (y_max * 0.05), y_max * 1.05) ax1.set_ylabel('Number of reads') ax1.set_xlabel('GC fraction') xticks = [idx for idx, x in enumerate(bin_labels) if int(x * 100) % 10 == 0] ax1.set_xticks(xticks) ax1.set_xticklabels(["{:.1f}".format(bin_labels[x]) for x in xticks]) x = np.linspace(0, 1, frequencies.shape[0]) y = np.log2(frequencies[:, 2]) ax2.plot(x, y, color='#8c96f0') ax2.set_xlabel('GC fraction') ax2.set_ylabel('log2ratio observed/expected') ax2.set_xlim(0.2, 0.7) y_max = max(y[np.where(x >= 0.2)[0][0]:np.where(x <= 0.7)[0][-1] + 1]) y_min = min(y[np.where(x >= 0.2)[0][0]:np.where(x <= 0.7)[0][-1] + 1]) if y_max > 0: y_max *= 1.1 else: y_max *= 0.9 if y_min < 0: y_min *= 1.1 else: y_min *= 0.9 ax2.set_ylim(y_min, y_max) plt.tight_layout() plt.savefig(file_name, bbox_inches='tight', dpi=100, format=image_format) plt.close() def main(args=None): args = parse_arguments().parse_args(args) if args.extraSampling: extra_sampling_file = args.extraSampling.name args.extraSampling.close() else: extra_sampling_file = None global global_vars global_vars = {} global_vars['2bit'] = args.genome global_vars['bam'] = args.bamfile global_vars['filter_out'] = args.blackListFileName global_vars['extra_sampling_file'] = extra_sampling_file tbit = py2bit.open(global_vars['2bit']) bam, mapped, unmapped, stats = bamHandler.openBam(global_vars['bam'], returnStats=True, nThreads=args.numberOfProcessors) if args.fragmentLength: fragment_len_dict = \ {'median': args.fragmentLength} else: fragment_len_dict, __ = \ get_read_and_fragment_length(args.bamfile, None, numberOfProcessors=args.numberOfProcessors, verbose=args.verbose) if not fragment_len_dict: print("\nPlease provide the fragment length used for the " "sample preparation.\n") exit(1) fragment_len_dict = {'median': int(fragment_len_dict['median'])} chrNameBitToBam = tbitToBamChrName(list(tbit.chroms().keys()), bam.references) global_vars['genome_size'] = sum(tbit.chroms().values()) global_vars['total_reads'] = mapped global_vars['reads_per_bp'] = \ float(global_vars['total_reads']) / args.effectiveGenomeSize confidence_p_value = float(1) / args.sampleSize # chromSizes: list of tuples chromSizes = [(bam.references[i], bam.lengths[i]) for i in range(len(bam.references))] chromSizes = [x for x in chromSizes if x[0] in tbit.chroms()] # use poisson distribution to identify peaks that should be discarted. # I multiply by 4, because the real distribution of reads # vary depending on the gc content # and the global number of reads per bp may a be too low. # empirically, a value of at least 4 times as big as the # reads_per_bp was found. # Similarly for the min value, I divide by 4. global_vars['max_reads'] = poisson(4 * global_vars['reads_per_bp'] * fragment_len_dict['median']).isf(confidence_p_value) # this may be of not use, unless the depth of sequencing is really high # as this value is close to 0 global_vars['min_reads'] = poisson(0.25 * global_vars['reads_per_bp'] * fragment_len_dict['median']).ppf(confidence_p_value) for key in global_vars: print("{}: {}".format(key, global_vars[key])) print("computing frequencies") # the GC of the genome is sampled each stepSize bp. stepSize = max(int(global_vars['genome_size'] / args.sampleSize), 1) print("stepSize: {}".format(stepSize)) data = tabulateGCcontent(fragment_len_dict, chrNameBitToBam, stepSize, chromSizes, numberOfProcessors=args.numberOfProcessors, verbose=args.verbose, region=args.region) np.savetxt(args.GCbiasFrequenciesFile.name, data) if args.biasPlot: reads_per_gc = countReadsPerGC(args.regionSize, chrNameBitToBam, stepSize * 10, chromSizes, numberOfProcessors=args.numberOfProcessors, verbose=args.verbose, region=args.region) if args.plotFileFormat == "plotly": plotlyGCbias(args.biasPlot, data, reads_per_gc, args.regionSize) else: plotGCbias(args.biasPlot, data, reads_per_gc, args.regionSize, image_format=args.plotFileFormat) class Tester(): def __init__(self): import os self.root = os.path.dirname(os.path.abspath(__file__)) + "/test/test_corrGC/" self.tbitFile = self.root + "sequence.2bit" self.bamFile = self.root + "test.bam" self.mappability = self.root + "mappability.bw" self.chrNameBam = '2L' self.chrNameBit = 'chr2L' bam, mapped, unmapped, stats = bamHandler.openBam(self.bamFile, returnStats=True) tbit = py2bit.open(self.tbitFile) global debug debug = 0 global global_vars global_vars = {'2bit': self.tbitFile, 'bam': self.bamFile, 'filter_out': None, 'mappability': self.mappability, 'extra_sampling_file': None, 'max_reads': 5, 'min_reads': 0, 'min_reads': 0, 'reads_per_bp': 0.3, 'total_reads': mapped, 'genome_size': sum(tbit.chroms().values()) } def testTabulateGCcontentWorker(self): stepSize = 2 fragmentLength = {'min': 1, 'median': 3, 'max': 5} start = 0 end = 20 chrNameBam2bit = {'2L': 'chr2L'} return (self.chrNameBam, start, end, stepSize, fragmentLength, chrNameBam2bit) def set_filter_out_file(self): global global_vars global_vars['filter_out'] = self.root + "filter_out.bed" def unset_filter_out_file(self): global global_vars global_vars['filter_out'] = None def set_extra_sampling_file(self): global global_vars global_vars['extra_sampling_file'] = self.root + "extra_sampling.bed" def testTabulateGCcontent(self): fragmentLength = {'median': 10} chrNameBitToBam = {'chr2L': '2L'} stepSize = 1 bam = bamHandler.openBam(global_vars['bam']) chromSizes = [(bam.references[i], bam.lengths[i]) for i in range(len(bam.references))] return (fragmentLength, chrNameBitToBam, stepSize, chromSizes, 1) def testCountReadsPerGC(self): regionSize = 300 chrNameBitToBam = {'chr2L': '2L'} stepSize = 1 bam = bamHandler.openBam(global_vars['bam']) chromSizes = [(bam.references[i], bam.lengths[i]) for i in range(len(bam.references))] return (regionSize, chrNameBitToBam, stepSize, chromSizes, 1) if __name__ == "__main__": main()
gpl-3.0
kernc/scikit-learn
examples/ensemble/plot_adaboost_twoclass.py
347
3268
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The distributions of decision scores are shown separately for samples of class A and B. The predicted class label for each sample is determined by the sign of the decision score. Samples with decision scores greater than zero are classified as B, and are otherwise classified as A. The magnitude of a decision score determines the degree of likeness with the predicted class label. Additionally, a new dataset could be constructed containing a desired purity of class B, for example, by only selecting samples with a decision score above some value. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import make_gaussian_quantiles # Construct dataset X1, y1 = make_gaussian_quantiles(cov=2., n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1) X = np.concatenate((X1, X2)) y = np.concatenate((y1, - y2 + 1)) # Create and fit an AdaBoosted decision tree bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200) bdt.fit(X, y) plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries plt.subplot(121) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') # Plot the two-class decision scores twoclass_output = bdt.decision_function(X) plot_range = (twoclass_output.min(), twoclass_output.max()) plt.subplot(122) for i, n, c in zip(range(2), class_names, plot_colors): plt.hist(twoclass_output[y == i], bins=10, range=plot_range, facecolor=c, label='Class %s' % n, alpha=.5) x1, x2, y1, y2 = plt.axis() plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc='upper right') plt.ylabel('Samples') plt.xlabel('Score') plt.title('Decision Scores') plt.tight_layout() plt.subplots_adjust(wspace=0.35) plt.show()
bsd-3-clause
sinhrks/pyopendata
pyopendata/io/jstat.py
1
4233
# pylint: disable-msg=E1101,W0613,W0603 from __future__ import unicode_literals import os import requests import numpy as np import pandas as pd import pandas.compat as compat from pyopendata.io.util import _read_content def read_jstat(path_or_buf, typ='frame', squeeze=True): """ Convert a JSON-Stat string to pandas object Parameters ---------- filepath_or_buffer : a valid JSON-Stat string or file-like http://json-stat.org/ typ : {'frame', 'series'} Type of object to recover (series or frame), default 'frame' squeeze : bool, default True If True, return DataFrame or Series when the input has only one dataset. When the input has multiple dataset, returns dictionary of results. If False, always return a dictionary. Returns ------- results : Series, DataFrame, or dictionaly of Series or DataFrame. """ jdata = _read_content(path_or_buf) import json if isinstance(jdata, dict): datasets = jdata else: datasets = json.loads(jdata, object_pairs_hook=compat.OrderedDict) results = {} for dataname, dataset in compat.iteritems(datasets): values = dataset['value'] # mandatory dimensions = dataset['dimension'] # mandatory # Not supported, as the reis no specific meaning # in current format specification # status = dataset.get('status', None) # optional midx = _parse_dimensions(dimensions) values = _parse_values(values, size=len(midx)) result = pd.Series(values, index=midx) if typ == 'frame': if result.index.nlevels > 1: result = result.unstack() else: result = result.to_frame() elif typ == 'series': pass else: raise ValueError("'typ' must be either 'frame' or 'series'") if len(datasets) == 1 and squeeze: return result results[dataname] = result return results def _parse_values(values, size): if isinstance(values, list): return values elif isinstance(values, dict): result = [np.nan] * size for k, v in compat.iteritems(values): result[int(k)] = v return result else: raise ValueError("'values' must be list or dict") def _parse_dimensions(dimensions): names = dimensions['id'] sizes = dimensions['size'] arrays = [] for name in names: dimension = dimensions[name] # mandatory # roles = dimensions.get('role', None) # optional categories = dimension['category'] # mandatory index = None if 'index' not in categories and 'label' not in categories: # index is required unless the dimension is a constant dimension # In the case that a category index is not provided, # a category label must be included. raise ValueError("Input must have 'index' or 'label' attribute") if 'index' in categories: index = categories['index'] if isinstance(index, list): pass elif isinstance(index, dict): sorted_index = [] for k, v in sorted(index.items(), key=lambda x:x[1]): sorted_index.append(k) index = sorted_index else: raise ValueError("'index' must be list or dict, " "{0} given".format(type(index))) if 'label' in categories: labels = categories['label'] if isinstance(labels, dict): if index is None: if len(labels) == 1: index = list(labels.values()) else: raise ValueError("'index' is required to match multiple labels") else: index = [labels[i] for i in index] else: raise ValueError("'label' must be dict, " "{0} given".format(type(labels))) arrays.append(index) midx = pd.MultiIndex.from_product(arrays, names=names) return midx
bsd-2-clause
jorge2703/scikit-learn
examples/linear_model/plot_sgd_comparison.py
167
1659
""" ================================== Comparing various online solvers ================================== An example showing how different online solvers perform on the hand-written digits dataset. """ # Author: Rob Zinkov <rob at zinkov dot com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.linear_model import SGDClassifier, Perceptron from sklearn.linear_model import PassiveAggressiveClassifier heldout = [0.95, 0.90, 0.75, 0.50, 0.01] rounds = 20 digits = datasets.load_digits() X, y = digits.data, digits.target classifiers = [ ("SGD", SGDClassifier()), ("ASGD", SGDClassifier(average=True)), ("Perceptron", Perceptron()), ("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge', C=1.0)), ("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge', C=1.0)), ] xx = 1. - np.array(heldout) for name, clf in classifiers: rng = np.random.RandomState(42) yy = [] for i in heldout: yy_ = [] for r in range(rounds): X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=i, random_state=rng) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) yy_.append(1 - np.mean(y_pred == y_test)) yy.append(np.mean(yy_)) plt.plot(xx, yy, label=name) plt.legend(loc="upper right") plt.xlabel("Proportion train") plt.ylabel("Test Error Rate") plt.show()
bsd-3-clause
djgagne/scikit-learn
examples/linear_model/plot_logistic_l1_l2_sparsity.py
384
2601
""" ============================================== L1 Penalty and Sparsity in Logistic Regression ============================================== Comparison of the sparsity (percentage of zero coefficients) of solutions when L1 and L2 penalty are used for different values of C. We can see that large values of C give more freedom to the model. Conversely, smaller values of C constrain the model more. In the L1 penalty case, this leads to sparser solutions. We classify 8x8 images of digits into two classes: 0-4 against 5-9. The visualization shows coefficients of the models for varying C. """ print(__doc__) # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.preprocessing import StandardScaler digits = datasets.load_digits() X, y = digits.data, digits.target X = StandardScaler().fit_transform(X) # classify small against large digits y = (y > 4).astype(np.int) # Set regularization parameter for i, C in enumerate((100, 1, 0.01)): # turn down tolerance for short training time clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01) clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01) clf_l1_LR.fit(X, y) clf_l2_LR.fit(X, y) coef_l1_LR = clf_l1_LR.coef_.ravel() coef_l2_LR = clf_l2_LR.coef_.ravel() # coef_l1_LR contains zeros due to the # L1 sparsity inducing norm sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100 sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100 print("C=%.2f" % C) print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR) print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y)) print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR) print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y)) l1_plot = plt.subplot(3, 2, 2 * i + 1) l2_plot = plt.subplot(3, 2, 2 * (i + 1)) if i == 0: l1_plot.set_title("L1 penalty") l2_plot.set_title("L2 penalty") l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) plt.text(-8, 3, "C = %.2f" % C) l1_plot.set_xticks(()) l1_plot.set_yticks(()) l2_plot.set_xticks(()) l2_plot.set_yticks(()) plt.show()
bsd-3-clause
yyjiang/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
sarahgrogan/scikit-learn
examples/covariance/plot_mahalanobis_distances.py
348
6232
r""" ================================================================ Robust covariance estimation and Mahalanobis distances relevance ================================================================ An example to show covariance estimation with the Mahalanobis distances on Gaussian distributed data. For Gaussian distributed data, the distance of an observation :math:`x_i` to the mode of the distribution can be computed using its Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i - \mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are the location and the covariance of the underlying Gaussian distribution. In practice, :math:`\mu` and :math:`\Sigma` are replaced by some estimates. The usual covariance maximum likelihood estimate is very sensitive to the presence of outliers in the data set and therefor, the corresponding Mahalanobis distances are. One would better have to use a robust estimator of covariance to guarantee that the estimation is resistant to "erroneous" observations in the data set and that the associated Mahalanobis distances accurately reflect the true organisation of the observations. The Minimum Covariance Determinant estimator is a robust, high-breakdown point (i.e. it can be used to estimate the covariance matrix of highly contaminated datasets, up to :math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers) estimator of covariance. The idea is to find :math:`\frac{n_\text{samples}+n_\text{features}+1}{2}` observations whose empirical covariance has the smallest determinant, yielding a "pure" subset of observations from which to compute standards estimates of location and covariance. The Minimum Covariance Determinant estimator (MCD) has been introduced by P.J.Rousseuw in [1]. This example illustrates how the Mahalanobis distances are affected by outlying data: observations drawn from a contaminating distribution are not distinguishable from the observations coming from the real, Gaussian distribution that one may want to work with. Using MCD-based Mahalanobis distances, the two populations become distinguishable. Associated applications are outliers detection, observations ranking, clustering, ... For visualization purpose, the cubic root of the Mahalanobis distances are represented in the boxplot, as Wilson and Hilferty suggest [2] [1] P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984. [2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square. Proceedings of the National Academy of Sciences of the United States of America, 17, 684-688. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.covariance import EmpiricalCovariance, MinCovDet n_samples = 125 n_outliers = 25 n_features = 2 # generate data gen_cov = np.eye(n_features) gen_cov[0, 0] = 2. X = np.dot(np.random.randn(n_samples, n_features), gen_cov) # add some outliers outliers_cov = np.eye(n_features) outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7. X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov) # fit a Minimum Covariance Determinant (MCD) robust estimator to data robust_cov = MinCovDet().fit(X) # compare estimators learnt from the full data set with true parameters emp_cov = EmpiricalCovariance().fit(X) ############################################################################### # Display results fig = plt.figure() plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05) # Show data set subfig1 = plt.subplot(3, 1, 1) inlier_plot = subfig1.scatter(X[:, 0], X[:, 1], color='black', label='inliers') outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:], color='red', label='outliers') subfig1.set_xlim(subfig1.get_xlim()[0], 11.) subfig1.set_title("Mahalanobis distances of a contaminated data set:") # Show contours of the distance functions xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100), np.linspace(plt.ylim()[0], plt.ylim()[1], 100)) zz = np.c_[xx.ravel(), yy.ravel()] mahal_emp_cov = emp_cov.mahalanobis(zz) mahal_emp_cov = mahal_emp_cov.reshape(xx.shape) emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov), cmap=plt.cm.PuBu_r, linestyles='dashed') mahal_robust_cov = robust_cov.mahalanobis(zz) mahal_robust_cov = mahal_robust_cov.reshape(xx.shape) robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov), cmap=plt.cm.YlOrBr_r, linestyles='dotted') subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1], inlier_plot, outlier_plot], ['MLE dist', 'robust dist', 'inliers', 'outliers'], loc="upper right", borderaxespad=0) plt.xticks(()) plt.yticks(()) # Plot the scores for each point emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33) subfig2 = plt.subplot(2, 2, 3) subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25) subfig2.plot(1.26 * np.ones(n_samples - n_outliers), emp_mahal[:-n_outliers], '+k', markeredgewidth=1) subfig2.plot(2.26 * np.ones(n_outliers), emp_mahal[-n_outliers:], '+k', markeredgewidth=1) subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15) subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)") plt.yticks(()) robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33) subfig3 = plt.subplot(2, 2, 4) subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]], widths=.25) subfig3.plot(1.26 * np.ones(n_samples - n_outliers), robust_mahal[:-n_outliers], '+k', markeredgewidth=1) subfig3.plot(2.26 * np.ones(n_outliers), robust_mahal[-n_outliers:], '+k', markeredgewidth=1) subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15) subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)") plt.yticks(()) plt.show()
bsd-3-clause
pratapvardhan/scikit-learn
sklearn/datasets/tests/test_base.py
33
7160
import os import shutil import tempfile import warnings import nose import numpy from pickle import loads from pickle import dumps from sklearn.datasets import get_data_home from sklearn.datasets import clear_data_home from sklearn.datasets import load_files from sklearn.datasets import load_sample_images from sklearn.datasets import load_sample_image from sklearn.datasets import load_digits from sklearn.datasets import load_diabetes from sklearn.datasets import load_linnerud from sklearn.datasets import load_iris from sklearn.datasets import load_breast_cancer from sklearn.datasets import load_boston from sklearn.datasets.base import Bunch from sklearn.externals.six import b, u from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_") LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_") TEST_CATEGORY_DIR1 = "" TEST_CATEGORY_DIR2 = "" def _remove_dir(path): if os.path.isdir(path): shutil.rmtree(path) def teardown_module(): """Test fixture (clean up) run once after all tests of this module""" for path in [DATA_HOME, LOAD_FILES_ROOT]: _remove_dir(path) def setup_load_files(): global TEST_CATEGORY_DIR1 global TEST_CATEGORY_DIR2 TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT) TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT) sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1, delete=False) sample_file.write(b("Hello World!\n")) sample_file.close() def teardown_load_files(): _remove_dir(TEST_CATEGORY_DIR1) _remove_dir(TEST_CATEGORY_DIR2) def test_data_home(): # get_data_home will point to a pre-existing folder data_home = get_data_home(data_home=DATA_HOME) assert_equal(data_home, DATA_HOME) assert_true(os.path.exists(data_home)) # clear_data_home will delete both the content and the folder it-self clear_data_home(data_home=data_home) assert_false(os.path.exists(data_home)) # if the folder is missing it will be created again data_home = get_data_home(data_home=DATA_HOME) assert_true(os.path.exists(data_home)) def test_default_empty_load_files(): res = load_files(LOAD_FILES_ROOT) assert_equal(len(res.filenames), 0) assert_equal(len(res.target_names), 0) assert_equal(res.DESCR, None) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_default_load_files(): res = load_files(LOAD_FILES_ROOT) assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 2) assert_equal(res.DESCR, None) assert_equal(res.data, [b("Hello World!\n")]) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_load_files_w_categories_desc_and_encoding(): category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop() res = load_files(LOAD_FILES_ROOT, description="test", categories=category, encoding="utf-8") assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 1) assert_equal(res.DESCR, "test") assert_equal(res.data, [u("Hello World!\n")]) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_load_files_wo_load_content(): res = load_files(LOAD_FILES_ROOT, load_content=False) assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 2) assert_equal(res.DESCR, None) assert_equal(res.get('data'), None) def test_load_sample_images(): try: res = load_sample_images() assert_equal(len(res.images), 2) assert_equal(len(res.filenames), 2) assert_true(res.DESCR) except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_digits(): digits = load_digits() assert_equal(digits.data.shape, (1797, 64)) assert_equal(numpy.unique(digits.target).size, 10) def test_load_digits_n_class_lt_10(): digits = load_digits(9) assert_equal(digits.data.shape, (1617, 64)) assert_equal(numpy.unique(digits.target).size, 9) def test_load_sample_image(): try: china = load_sample_image('china.jpg') assert_equal(china.dtype, 'uint8') assert_equal(china.shape, (427, 640, 3)) except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_missing_sample_image_error(): have_PIL = True try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: have_PIL = False if have_PIL: assert_raises(AttributeError, load_sample_image, 'blop.jpg') else: warnings.warn("Could not load sample images, PIL is not available.") def test_load_diabetes(): res = load_diabetes() assert_equal(res.data.shape, (442, 10)) assert_true(res.target.size, 442) def test_load_linnerud(): res = load_linnerud() assert_equal(res.data.shape, (20, 3)) assert_equal(res.target.shape, (20, 3)) assert_equal(len(res.target_names), 3) assert_true(res.DESCR) def test_load_iris(): res = load_iris() assert_equal(res.data.shape, (150, 4)) assert_equal(res.target.size, 150) assert_equal(res.target_names.size, 3) assert_true(res.DESCR) def test_load_breast_cancer(): res = load_breast_cancer() assert_equal(res.data.shape, (569, 30)) assert_equal(res.target.size, 569) assert_equal(res.target_names.size, 2) assert_true(res.DESCR) def test_load_boston(): res = load_boston() assert_equal(res.data.shape, (506, 13)) assert_equal(res.target.size, 506) assert_equal(res.feature_names.size, 13) assert_true(res.DESCR) def test_loads_dumps_bunch(): bunch = Bunch(x="x") bunch_from_pkl = loads(dumps(bunch)) bunch_from_pkl.x = "y" assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x) def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): bunch = Bunch(key='original') # This reproduces a problem when Bunch pickles have been created # with scikit-learn 0.16 and are read with 0.17. Basically there # is a suprising behaviour because reading bunch.key uses # bunch.__dict__ (which is non empty for 0.16 Bunch objects) # whereas assigning into bunch.key uses bunch.__setattr__. See # https://github.com/scikit-learn/scikit-learn/issues/6196 for # more details bunch.__dict__['key'] = 'set from __dict__' bunch_from_pkl = loads(dumps(bunch)) # After loading from pickle the __dict__ should have been ignored assert_equal(bunch_from_pkl.key, 'original') assert_equal(bunch_from_pkl['key'], 'original') # Making sure that changing the attr does change the value # associated with __getitem__ as well bunch_from_pkl.key = 'changed' assert_equal(bunch_from_pkl.key, 'changed') assert_equal(bunch_from_pkl['key'], 'changed')
bsd-3-clause
pompiduskus/scikit-learn
examples/cluster/plot_color_quantization.py
297
3443
# -*- coding: utf-8 -*- """ ================================== Color Quantization using K-Means ================================== Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace (China), reducing the number of colors required to show the image from 96,615 unique colors to 64, while preserving the overall appearance quality. In this example, pixels are represented in a 3D-space and K-means is used to find 64 color clusters. In the image processing literature, the codebook obtained from K-means (the cluster centers) is called the color palette. Using a single byte, up to 256 colors can be addressed, whereas an RGB encoding requires 3 bytes per pixel. The GIF file format, for example, uses such a palette. For comparison, a quantized image using a random codebook (colors picked up randomly) is also shown. """ # Authors: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # # License: BSD 3 clause print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin from sklearn.datasets import load_sample_image from sklearn.utils import shuffle from time import time n_colors = 64 # Load the Summer Palace photo china = load_sample_image("china.jpg") # Convert to floats instead of the default 8 bits integer coding. Dividing by # 255 is important so that plt.imshow behaves works well on float data (need to # be in the range [0-1] china = np.array(china, dtype=np.float64) / 255 # Load Image and transform to a 2D numpy array. w, h, d = original_shape = tuple(china.shape) assert d == 3 image_array = np.reshape(china, (w * h, d)) print("Fitting model on a small sub-sample of the data") t0 = time() image_array_sample = shuffle(image_array, random_state=0)[:1000] kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample) print("done in %0.3fs." % (time() - t0)) # Get labels for all points print("Predicting color indices on the full image (k-means)") t0 = time() labels = kmeans.predict(image_array) print("done in %0.3fs." % (time() - t0)) codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1] print("Predicting color indices on the full image (random)") t0 = time() labels_random = pairwise_distances_argmin(codebook_random, image_array, axis=0) print("done in %0.3fs." % (time() - t0)) def recreate_image(codebook, labels, w, h): """Recreate the (compressed) image from the code book & labels""" d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image # Display all results, alongside original image plt.figure(1) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Original image (96,615 colors)') plt.imshow(china) plt.figure(2) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, K-Means)') plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h)) plt.figure(3) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, Random)') plt.imshow(recreate_image(codebook_random, labels_random, w, h)) plt.show()
bsd-3-clause
text-machine-lab/CliRel
src/note.py
1
14806
""" Text-Machine Lab: CliRel File Name : note.py Creation Date : 30-09-2016 Created By : Renan Campos Purpose : Internal data representation for a document set. Each entry consists of a concept pair, a sentence, and a relation label. The entries are indexed by filename and line number. """ import os import re import numpy as np import pandas as pd from pandas import DataFrame, Series def extractConsFromText(line): """ Takes a line in i2b2 concept syntax and returns the data. >>> extractConsFromText('c="This treatment" 1:0 1:1||t="treatment"') (1, 0, 1, 'treatment', 'This treatment') >>> extractConsFromText('c="medical problem" 1:3 1:4||t="problem"') (1, 3, 4, 'problem', 'medical problem') >>> extractConsFromText('c="Treatment" 2:0 2:0||t="treatment"') (2, 0, 0, 'treatment', 'Treatment') """ m = re.match(r"c=\"(.*)\" (\d+):(\d+) \d+:(\d+)\|\|t=\"(.*)\"", line) return (int(m.group(2)), int(m.group(3)), int(m.group(4)), m.group(5), m.group(1)) def extractCons(consFile): """ Takes a concept file and returns a panda datatable. Where every entry is a pair of concepts. >>> print extractCons('./i2b2_examples/concept/health.con').ix[0] lineNum 1 conStart1 0 conEnd1 1 conType1 treatment conText1 This treatment conStart2 3 conEnd2 4 conType2 problem conText2 medical problem fileName health Name: 0, dtype: object >>> print extractCons('./i2b2_examples/concept/health.con').ix[1] lineNum 2 conStart1 0 conEnd1 0 conType1 treatment conText1 Treatment conStart2 2 conEnd2 3 conType2 problem conText2 medical problem fileName health Name: 1, dtype: object """ data = list() with open(consFile, 'r') as f: for line in f: data.append(extractConsFromText(line)) out = list() for i,d1 in enumerate(data): for j,d2 in enumerate(data): if d1[0] == d2[0] and i != j: out.append(d1 + d2[1:]) # No concept pairs if len(out) == 0: return None out = DataFrame(out, columns = ["lineNum", "conStart1", "conEnd1", "conType1", "conText1", "conStart2", "conEnd2", "conType2", "conText2"]) out['fileName'] = os.path.basename(consFile).split(".")[0] return out def extractRelFromText(line): """ Takes a line in i2b2 relation syntax and returns the data. >>> extractRelFromText('c="This treatment" 1:0 1:1||r="TrIP"||c="medical problem" 1:3 1:4') (1, 0, 1, 'This treatment', 3, 4, 'medical problem', 'TrIP') >>> extractRelFromText('c="Treatment" 2:0 2:0||r="TrWP"||c="medical problem" 2:2 2:3') (2, 0, 0, 'Treatment', 2, 3, 'medical problem', 'TrWP') >>> extractRelFromText('c="Treatment" 3:0 3:0||r="TrCP"||c="medical problem" 3:2 3:3') (3, 0, 0, 'Treatment', 2, 3, 'medical problem', 'TrCP') """ m = re.match(r"c=\"(.*)\" (\d+):(\d+) \d+:(\d+)\|\|r=\"(.*)\"\|\|c=\"(.*)\" \d+:(\d+) \d+:(\d+)", line) return (int(m.group(2)), int(m.group(3)), int(m.group(4)), m.group(1), int(m.group(7)), int(m.group(8)), m.group(6), m.group(5)) def extractRels(relFile): """ Takes a relation file and returns a panda datatable. >>> print extractRels('./i2b2_examples/rel/health.rel').ix[0] lineNum 1 conStart1 0 conEnd1 1 conText1 This treatment conStart2 3 conEnd2 4 conText2 medical problem relType TrIP fileName health Name: 0, dtype: object >>> print extractRels('./i2b2_examples/rel/health.rel').ix[1] lineNum 2 conStart1 0 conEnd1 0 conText1 Treatment conStart2 2 conEnd2 3 conText2 medical problem relType TrWP fileName health Name: 1, dtype: object """ data = list() with open(relFile, 'r') as f: for line in f: data.append(extractRelFromText(line)) # Handling empty dataframe if len(data) == 0: return None out = DataFrame(data, columns = ["lineNum", "conStart1", "conEnd1", "conText1", "conStart2", "conEnd2", "conText2", "relType"]) out['fileName'] = os.path.basename(relFile).split(".")[0] return out def writeRel(e): """ Takes an entry and returns a string in the i2b2 relation format >>> writeRel(createEntries('./i2b2_examples/concept', './i2b2_examples/txt/', './i2b2_examples/rel/').ix[0]) 'c="This treatment" 1:0 1:1||r="TrIP"||c="medical problem" 1:3 1:4' """ return 'c=\"%s\" %d:%d %d:%d||r=\"%s\"||c=\"%s\" %d:%d %d:%d' % (e.conText1, int(e.lineNum), int(e.conStart1), int(e.lineNum), int(e.conEnd1), e.relType, e.conText2, int(e.lineNum), int(e.conStart2), int(e.lineNum), int(e.conEnd2)) def extractTxts(txtFile): """ Takes a relation file and returns a panda datatable. >>> print extractTxts('./i2b2_examples/txt/health.txt').ix[0] lineNum 1 text This treatment improves medical problem . fileName health Name: 0, dtype: object >>> print extractTxts('./i2b2_examples/txt/health.txt').ix[1] lineNum 2 text Treatment worsens medical problem . fileName health Name: 1, dtype: object """ data = list() with open(txtFile, 'r') as f: for i, line in enumerate(f): data.append((i+1, line.strip())) out = DataFrame(data, columns = ["lineNum", "text"]) out['fileName'] = os.path.basename(txtFile).split(".")[0] return out def createTraining(cFile, tFile, rFile): """ Given the concepts, text and relation files, consilidate that data into a single dataframe. >>> print createTraining('./i2b2_examples/concept/health.con', './i2b2_examples/txt/health.txt', './i2b2_examples/rel/health.rel').ix[0] lineNum 1 conStart1 0 conEnd1 1 conType1 treatment conText1 This treatment conStart2 3 conEnd2 4 conType2 problem conText2 medical problem fileName health relType TrIP text This treatment improves medical problem . Name: 0, dtype: object >>> print createTraining('./i2b2_examples/concept/health.con', './i2b2_examples/txt/health.txt', './i2b2_examples/rel/health.rel').ix[1] lineNum 2 conStart1 0 conEnd1 0 conType1 treatment conText1 Treatment conStart2 2 conEnd2 3 conType2 problem conText2 medical problem fileName health relType TrWP text Treatment worsens medical problem . Name: 1, dtype: object """ concepts = extractCons(cFile) # Handling empty concept pairs (Relations need two concepts) if type(concepts) == type(None): return None text = extractTxts(tFile) relations = extractRels(rFile) # Handling empty relations if type(relations) == type(None): concepts['relType'] = np.nan return pd.merge(concepts, text, how='left') # return None return pd.merge(pd.merge(concepts, relations, how='outer'), text, how='left') def createTesting(cFile, tFile): """ Given the concepts, text and relation files, consilidate that data into a single dataframe. >>> print createTesting('./i2b2_examples/concept/health.con', './i2b2_examples/txt/health.txt').ix[0] lineNum 1 conStart1 0 conEnd1 1 conType1 treatment conText1 This treatment conStart2 3 conEnd2 4 conType2 problem conText2 medical problem fileName health relType NaN text This treatment improves medical problem . Name: 0, dtype: object >>> print createTesting('./i2b2_examples/concept/health.con', './i2b2_examples/txt/health.txt').ix[1] lineNum 2 conStart1 0 conEnd1 0 conType1 treatment conText1 Treatment conStart2 2 conEnd2 3 conType2 problem conText2 medical problem fileName health relType NaN text Treatment worsens medical problem . Name: 1, dtype: object """ concepts = extractCons(cFile) # Handling empty concept pairs (Relations need two concepts) if type(concepts) == type(None): return None concepts["relType"] = np.nan text = extractTxts(tFile) return pd.merge(concepts, text, how='left') def filterFiles(d, extension): """ Only list files with the specified extension Helper for create Entries >>> filterFiles('./i2b2_examples/txt', 'txt') ['./i2b2_examples/txt/health.txt', './i2b2_examples/txt/health2.txt'] >>> filterFiles('./i2b2_examples/concept', 'con') ['./i2b2_examples/concept/health.con', './i2b2_examples/concept/health2.con'] """ files = list() for f in os.listdir(d): if f.endswith(extension): files.append(os.path.join(d, f)) files.sort() return files def createEntries(c_dir, t_dir, r_dir=None): """ Creates a complete table of data from the files in the given directories. i2b2_examples contains two files that each have 11 entries. >>> createEntries('./i2b2_examples/concept', './i2b2_examples/txt/').shape (22, 12) >>> createEntries('./i2b2_examples/concept', './i2b2_examples/txt/').ix[0] conEnd1 1 conEnd2 4 conStart1 0 conStart2 3 conText1 This treatment conText2 medical problem conType1 treatment conType2 problem fileName health lineNum 1 relType NaN text This treatment improves medical problem . Name: 0, dtype: object >>> createEntries('./i2b2_examples/concept', './i2b2_examples/txt/', './i2b2_examples/rel/').shape (22, 12) >>> createEntries('./i2b2_examples/concept', './i2b2_examples/txt/', './i2b2_examples/rel/').ix[0] conEnd1 1 conEnd2 4 conStart1 0 conStart2 3 conText1 This treatment conText2 medical problem conType1 treatment conType2 problem fileName health lineNum 1 relType TrIP text This treatment improves medical problem . Name: 0, dtype: object """ entries = DataFrame() txt = filterFiles(t_dir, 'txt') con = filterFiles(c_dir, 'con') if r_dir: rel = filterFiles(r_dir, 'rel') for t,c,r in zip(txt, con, rel): entry = createTraining(c, t, r) if type(entry) != type(None): entries = entries.append(entry, ignore_index=True) else: for t,c in zip(txt, con): entry = createTesting(c, t) if type(entry) != type(None): entries = entries.append(entry, ignore_index=True) return entries if __name__ == "__main__": import doctest doctest.testmod()
apache-2.0
liyu1990/sklearn
sklearn/tree/tests/test_tree.py
13
52365
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import check_random_state from sklearn.exceptions import NotFittedError from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.utils import compute_sample_weight CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, presort=True), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, presort=True), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = assert_warns( DeprecationWarning, clf.transform, X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_split(): """Test min_samples_split parameter""" X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test for integer parameter est = TreeEstimator(min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) # test for float parameter est = TreeEstimator(min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test integer parameter est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) # test float parameter est = TreeEstimator(min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): for name, TreeEstimator in ALL_TREES.items(): if "Classifier" in name: X, y = iris.data, iris.target else: X, y = boston.data, boston.target est = TreeEstimator(random_state=0) est.fit(X, y) score = est.score(X, y) fitted_attribute = dict() for attribute in ["max_depth", "node_count", "capacity"]: fitted_attribute[attribute] = getattr(est.tree_, attribute) serialized_object = pickle.dumps(est) est2 = pickle.loads(serialized_object) assert_equal(type(est2), est.__class__) score2 = est2.score(X, y) assert_equal(score, score2, "Failed to generate same score after pickling " "with {0}".format(name)) for attribute in fitted_attribute: assert_equal(getattr(est2.tree_, attribute), fitted_attribute[attribute], "Failed to generate same attribute {0} after " "pickling with {1}".format(attribute, name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = compute_sample_weight("balanced", unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if not est.presort: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test preceedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-3 <= value.flat[0] < 3, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), d.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray()) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if not TreeEstimator().presort: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name) def check_presort_sparse(est, X, y): assert_raises(ValueError, est.fit, X, y) def test_presort_sparse(): ests = (DecisionTreeClassifier(presort=True), DecisionTreeRegressor(presort=True)) sparse_matrices = (csr_matrix, csc_matrix, coo_matrix) y, X = datasets.make_multilabel_classification(random_state=0, n_samples=50, n_features=1, n_classes=20) y = y[:, 0] for est, sparse_matrix in product(ests, sparse_matrices): yield check_presort_sparse, est, sparse_matrix(X), y def test_decision_path_hardcoded(): X = iris.data y = iris.target est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) node_indicator = est.decision_path(X[:2]).toarray() assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) def check_decision_path(name): X = iris.data y = iris.target n_samples = X.shape[0] TreeEstimator = ALL_TREES[name] est = TreeEstimator(random_state=0, max_depth=2) est.fit(X, y) node_indicator_csr = est.decision_path(X) node_indicator = node_indicator_csr.toarray() assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count)) # Assert that leaves index are correct leaves = est.apply(X) leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) # Ensure only one leave node per sample all_leaves = est.tree_.children_left == TREE_LEAF assert_array_almost_equal(np.dot(node_indicator, all_leaves), np.ones(shape=n_samples)) # Ensure max depth is consistent with sum of indicator max_depth = node_indicator.sum(axis=1).max() assert_less_equal(est.tree_.max_depth, max_depth) def test_decision_path(): for name in ALL_TREES: yield (check_decision_path, name) def check_no_sparse_y_support(name): X, y = X_multilabel, csr_matrix(y_multilabel) TreeEstimator = ALL_TREES[name] assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) def test_no_sparse_y_support(): # Currently we don't support sparse y for name in ALL_TREES: yield (check_decision_path, name)
bsd-3-clause
lkloh/aimbat-lite
scripts/egplot.py
1
1177
#!/usr/bin/env python """ Example python script for SAC plotting replication: p1, p2, prs. Xiaoting Lou (xlou@u.northwestern.edu) 03/07/2012 """ from pylab import * import matplotlib.transforms as transforms from pysmo.aimbat.sacpickle import loadData from pysmo.aimbat.plotphase import getDataOpts, PPConfig, sacp1, sacp2, sacprs # figure axes fig = figure(figsize=(9,12)) rectp2 = [.09, .050, .8, .15] rectp1 = [.09, .245, .8, .33] rectp0 = [.09, .620, .8, .36] axp2 = fig.add_axes(rectp2) axp1 = fig.add_axes(rectp1) axp0 = fig.add_axes(rectp0) # read data and plot gsac, opts = getDataOpts() # prs opts.ynorm = .95 saclist = gsac.saclist prs = sacprs(saclist, opts, axp0) # p1 opts.ynorm = 1.7 p1 = sacp1(saclist, opts, axp1) # p2 opts.reltime = 0 p2 = sacp2(saclist, opts, axp2) # set x limits axp0.set_xlim(625, 762) axp1.set_xlim(625, 762) axp2.set_xlim(-45, 65) # numbering axs = [axp0, axp1, axp2] labs = 'abc' for ax, lab in zip(axs, labs): tt = '(' + lab + ')' trans = transforms.blended_transform_factory(ax.transAxes, ax.transAxes) ax.text(-.05, 1, tt, transform=trans, va='center', ha='right', size=16) fig.savefig('egplot.pdf', format='pdf') show()
gpl-3.0
walterreade/scikit-learn
sklearn/linear_model/tests/test_passive_aggressive.py
169
8809
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.base import ClassifierMixin from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import PassiveAggressiveRegressor iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) class MyPassiveAggressive(ClassifierMixin): def __init__(self, C=1.0, epsilon=0.01, loss="hinge", fit_intercept=True, n_iter=1, random_state=None): self.C = C self.epsilon = epsilon self.loss = loss self.fit_intercept = fit_intercept self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): p = self.project(X[i]) if self.loss in ("hinge", "squared_hinge"): loss = max(1 - y[i] * p, 0) else: loss = max(np.abs(p - y[i]) - self.epsilon, 0) sqnorm = np.dot(X[i], X[i]) if self.loss in ("hinge", "epsilon_insensitive"): step = min(self.C, loss / sqnorm) elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): step = loss / (sqnorm + 1.0 / (2 * self.C)) if self.loss in ("hinge", "squared_hinge"): step *= y[i] else: step *= np.sign(y[i] - p) self.w += step * X[i] if self.fit_intercept: self.b += step def project(self, X): return np.dot(X, self.w) + self.b def test_classifier_accuracy(): for data in (X, X_csr): for fit_intercept in (True, False): clf = PassiveAggressiveClassifier(C=1.0, n_iter=30, fit_intercept=fit_intercept, random_state=0) clf.fit(data, y) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_partial_fit(): classes = np.unique(y) for data in (X, X_csr): clf = PassiveAggressiveClassifier(C=1.0, fit_intercept=True, random_state=0) for t in range(30): clf.partial_fit(data, y, classes) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_refit(): # Classifier can be retrained on different labels and features. clf = PassiveAggressiveClassifier().fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) clf.fit(X[:, :-1], iris.target_names[y]) assert_array_equal(clf.classes_, iris.target_names) def test_classifier_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("hinge", "squared_hinge"): clf1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) clf1.fit(X, y_bin) for data in (X, X_csr): clf2 = PassiveAggressiveClassifier(C=1.0, loss=loss, fit_intercept=True, n_iter=2, shuffle=False) clf2.fit(data, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) def test_classifier_undefined_methods(): clf = PassiveAggressiveClassifier() for meth in ("predict_proba", "predict_log_proba", "transform"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth) def test_class_weights(): # Test class weights. X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y2 = [1, 1, 1, -1, -1] clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None, random_state=100) clf.fit(X2, y2) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight={1: 0.001}, random_state=100) clf.fit(X2, y2) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) def test_partial_fit_weight_class_balanced(): # partial_fit with class_weight='balanced' not supported clf = PassiveAggressiveClassifier(class_weight="balanced") assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y)) def test_equal_class_weight(): X2 = [[1, 0], [1, 0], [0, 1], [0, 1]] y2 = [0, 0, 1, 1] clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None) clf.fit(X2, y2) # Already balanced, so "balanced" weights should have no effect clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight="balanced") clf_balanced.fit(X2, y2) clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight={0: 0.5, 1: 0.5}) clf_weighted.fit(X2, y2) # should be similar up to some epsilon due to learning rate schedule assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2) def test_wrong_class_weight_label(): # ValueError due to wrong class_weight label. X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y2 = [1, 1, 1, -1, -1] clf = PassiveAggressiveClassifier(class_weight={0: 0.5}) assert_raises(ValueError, clf.fit, X2, y2) def test_wrong_class_weight_format(): # ValueError due to wrong class_weight argument type. X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y2 = [1, 1, 1, -1, -1] clf = PassiveAggressiveClassifier(class_weight=[0.5]) assert_raises(ValueError, clf.fit, X2, y2) clf = PassiveAggressiveClassifier(class_weight="the larch") assert_raises(ValueError, clf.fit, X2, y2) def test_regressor_mse(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): for fit_intercept in (True, False): reg = PassiveAggressiveRegressor(C=1.0, n_iter=50, fit_intercept=fit_intercept, random_state=0) reg.fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_partial_fit(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): reg = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, random_state=0) for t in range(50): reg.partial_fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"): reg1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) reg1.fit(X, y_bin) for data in (X, X_csr): reg2 = PassiveAggressiveRegressor(C=1.0, loss=loss, fit_intercept=True, n_iter=2, shuffle=False) reg2.fit(data, y_bin) assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) def test_regressor_undefined_methods(): reg = PassiveAggressiveRegressor() for meth in ("transform",): assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
bsd-3-clause
BillyLiggins/alphaBetaClassifier
machineLearning/script.py
1
2465
""" You have found that this simple logistic regression performs a lot better when you train on the posr and BAB classifier as oppose to the energy. This is werid! however may be explained by the .... Think about it. """ import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets from sklearn.neural_network import MLPClassifier from sklearn.cross_validation import train_test_split data_bi=np.loadtxt("Classifier_data_bi.dat",skiprows=1,delimiter=",",usecols=(0,2)) target_bi=np.ones(data_bi.shape[0]) data_po=np.loadtxt("Classifier_data_po.dat",skiprows=1,delimiter=",",usecols=(0,2)) target_po=np.zeros(data_po.shape[0]) data=np.array([y for x in map(None,data_bi,data_po[:data_bi.shape[0]]) for y in x if y is not None]) target=np.array([y for x in map(None,target_bi,target_po[:data_bi.shape[0]]) for y in x if y is not None]) print "Data loaded" X= data[:data.shape[0]/2] Y= target[:target.shape[0]/2] testData= data[data.shape[0]/2:] testTarget= target[target.shape[0]/2:] h = 2 # step size in the mesh logreg= MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(3, 2), random_state=1) # logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # ZZ = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) Z = logreg.predict(testData) print Z data_plot=(testTarget==Z).astype(int) print data_plot # plt.plot(data_plot) # plt.savefig("compareResults.png") success=data_plot.tolist().count(1) fail=data_plot.tolist().count(0) print "Number of successes = "+ str(success) print "Number of failures = "+ str(fail) print "chance of failure (fail/total) = " +str(float(fail)/(fail+success)) # # Put the result into a color plot # ZZ = ZZ.reshape(xx.shape) # plt.figure(1, figsize=(4, 3)) # plt.pcolormesh(xx, yy, ZZ, cmap=plt.cm.Paired) # # #Plot also the training points # plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) # plt.xlabel('MC Energy (MeV)') # plt.ylabel('BAB') # # plt.xlim(xx.min(), xx.max()) # plt.ylim(yy.min(), yy.max()) # plt.xticks(()) # plt.yticks(()) # # plt.show()
mit
GFDRR/thinkhazard
support_tools/API_parse/ADM2_api.py
1
1956
import grequests import pandas as pd from collections import defaultdict """ Instructions: You will need to install requests, grequests, and pandas if you have not already done so. conda install -c conda-forge requests grequests pandas OR pip install requests grequests pandas """ def parse_response(url, resp, df): """Parse JSON response and insert into dataframe.""" # Because we send the requests asynchronously, results may not # be returned in the order we sent it, so we get # the country code from the URL country_code = int(r.url.rsplit("/",1)[1].split(".")[0]) row = {} for hazard in resp: # Hazard type haztype = hazard['hazardtype']['mnemonic'] # Hazard Level hazlevel = hazard['hazardlevel']['mnemonic'] row[haztype] = hazlevel # End for # Have to loop over columns to ensure values are put in the correct position for col in df.columns: df.loc[df.index == country_code, col] = row[col] # End for target_url = "http://thinkhazard.org/en/report/{}.json" file_loc = "ADM2_TH.csv" code_data = pd.read_csv(file_loc, sep=';') ADM2_codes = code_data['ADM2_CODE'].tolist() ADM2_urls = [target_url.format(adm_code) for adm_code in ADM2_codes] # List of things to do asynchronously url_responses = [] for url in ADM2_urls: url_responses.append(grequests.get(url)) # Send out asynchronous requests responses = grequests.map(url_responses) result_df = pd.DataFrame(columns=['FL', 'UF', 'CF', 'EQ', 'LS', 'TS', 'VA', 'CY', 'DG', 'EH', 'WF'], index=ADM2_codes) result_df.index.name = 'country_code' for r in responses: if r.status_code != 200: print("Warning! Request for {} failed!".format(r.url)) continue parse_response(url, r.json(), result_df) # End for print(result_df) result_df.to_csv('TH_ADM2_ext.csv', sep=';')
gpl-3.0
elijah513/scikit-learn
examples/decomposition/plot_pca_vs_lda.py
182
1743
""" ======================================================= Comparison of LDA and PCA 2D projection of Iris dataset ======================================================= The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. """ print(__doc__) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA from sklearn.lda import LDA iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LDA(n_components=2) X_r2 = lda.fit(X, y).transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) plt.figure() for c, i, target_name in zip("rgb", [0, 1, 2], target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name) plt.legend() plt.title('PCA of IRIS dataset') plt.figure() for c, i, target_name in zip("rgb", [0, 1, 2], target_names): plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name) plt.legend() plt.title('LDA of IRIS dataset') plt.show()
bsd-3-clause
andyh616/mne-python
mne/decoding/tests/test_time_gen.py
3
11769
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Jean-Remi King <jeanremi.king@gmail.com> # # License: BSD (3-clause) import warnings import copy import os.path as op from nose.tools import assert_equal, assert_true, assert_raises import numpy as np from numpy.testing import assert_array_equal from mne import io, Epochs, read_events, pick_types from mne.utils import requires_sklearn, slow_test, run_tests_if_main from mne.decoding import GeneralizationAcrossTime, TimeDecoding data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_dir, 'test_raw.fif') event_name = op.join(data_dir, 'test-eve.fif') tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) event_id_gen = dict(aud_l=2, vis_l=4) def make_epochs(): raw = io.Raw(raw_fname, preload=False) events = read_events(event_name) picks = pick_types(raw.info, meg='mag', stim=False, ecg=False, eog=False, exclude='bads') picks = picks[0:2] decim = 30 # Test on time generalization within one condition with warnings.catch_warnings(record=True): epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, decim=decim) return epochs @slow_test @requires_sklearn def test_generalization_across_time(): """Test time generalization decoding """ from sklearn.svm import SVC from sklearn.linear_model import RANSACRegressor, LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from sklearn.cross_validation import LeaveOneLabelOut epochs = make_epochs() # Test default running gat = GeneralizationAcrossTime(picks='foo') assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat) assert_raises(ValueError, gat.fit, epochs) with warnings.catch_warnings(record=True): # check classic fit + check manual picks gat.picks = [0] gat.fit(epochs) # check optional y as array gat.picks = None gat.fit(epochs, y=epochs.events[:, 2]) # check optional y as list gat.fit(epochs, y=epochs.events[:, 2].tolist()) assert_equal(len(gat.picks_), len(gat.ch_names), 1) assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no " "prediction, no score>", '%s' % gat) assert_equal(gat.ch_names, epochs.ch_names) gat.predict(epochs) assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), " "predicted 14 epochs, no score>", "%s" % gat) gat.score(epochs) gat.score(epochs, y=epochs.events[:, 2]) gat.score(epochs, y=epochs.events[:, 2].tolist()) assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), " "predicted 14 epochs,\n scored " "(accuracy_score)>", "%s" % gat) with warnings.catch_warnings(record=True): gat.fit(epochs, y=epochs.events[:, 2]) old_mode = gat.predict_mode gat.predict_mode = 'super-foo-mode' assert_raises(ValueError, gat.predict, epochs) gat.predict_mode = old_mode gat.score(epochs, y=epochs.events[:, 2]) assert_true("accuracy_score" in '%s' % gat.scorer_) epochs2 = epochs.copy() # check _DecodingTime class assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: " "0.047 (s), length: 0.047 (s), n_time_windows: 15>", "%s" % gat.train_times_) assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: " "0.047 (s), length: 0.047 (s), n_time_windows: 15 x 15>", "%s" % gat.test_times_) # the y-check gat.predict_mode = 'mean-prediction' epochs2.events[:, 2] += 10 gat_ = copy.deepcopy(gat) assert_raises(ValueError, gat_.score, epochs2) gat.predict_mode = 'cross-validation' # Test basics # --- number of trials assert_true(gat.y_train_.shape[0] == gat.y_true_.shape[0] == len(gat.y_pred_[0][0]) == 14) # --- number of folds assert_true(np.shape(gat.estimators_)[1] == gat.cv) # --- length training size assert_true(len(gat.train_times_['slices']) == 15 == np.shape(gat.estimators_)[0]) # --- length testing sizes assert_true(len(gat.test_times_['slices']) == 15 == np.shape(gat.scores_)[0]) assert_true(len(gat.test_times_['slices'][0]) == 15 == np.shape(gat.scores_)[1]) # Test longer time window gat = GeneralizationAcrossTime(train_times={'length': .100}) with warnings.catch_warnings(record=True): gat2 = gat.fit(epochs) assert_true(gat is gat2) # return self assert_true(hasattr(gat2, 'cv_')) assert_true(gat2.cv_ != gat.cv) scores = gat.score(epochs) assert_true(isinstance(scores, list)) # type check assert_equal(len(scores[0]), len(scores)) # shape check assert_equal(len(gat.test_times_['slices'][0][0]), 2) # Decim training steps gat = GeneralizationAcrossTime(train_times={'step': .100}) with warnings.catch_warnings(record=True): gat.fit(epochs) gat.score(epochs) assert_true(len(gat.scores_) == len(gat.estimators_) == 8) # training time assert_equal(len(gat.scores_[0]), 15) # testing time # Test start stop training & test cv without n_fold params y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1)) gat = GeneralizationAcrossTime(cv=LeaveOneLabelOut(y_4classes), train_times={'start': 0.090, 'stop': 0.250}) # predict without fit assert_raises(RuntimeError, gat.predict, epochs) with warnings.catch_warnings(record=True): gat.fit(epochs, y=y_4classes) gat.score(epochs) assert_equal(len(gat.scores_), 4) assert_equal(gat.train_times_['times'][0], epochs.times[6]) assert_equal(gat.train_times_['times'][-1], epochs.times[9]) # Test score without passing epochs & Test diagonal decoding gat = GeneralizationAcrossTime(test_times='diagonal') with warnings.catch_warnings(record=True): gat.fit(epochs) assert_raises(RuntimeError, gat.score) gat.predict(epochs) scores = gat.score() assert_true(scores is gat.scores_) assert_equal(np.shape(gat.scores_), (15, 1)) assert_array_equal([tim for ttime in gat.test_times_['times'] for tim in ttime], gat.train_times_['times']) # Test generalization across conditions gat = GeneralizationAcrossTime(predict_mode='mean-prediction') with warnings.catch_warnings(record=True): gat.fit(epochs[0:6]) gat.predict(epochs[7:]) gat.score(epochs[7:]) # Test training time parameters gat_ = copy.deepcopy(gat) # --- start stop outside time range gat_.train_times = dict(start=-999.) assert_raises(ValueError, gat_.fit, epochs) gat_.train_times = dict(start=999.) assert_raises(ValueError, gat_.fit, epochs) # --- impossible slices gat_.train_times = dict(step=.000001) assert_raises(ValueError, gat_.fit, epochs) gat_.train_times = dict(length=.000001) assert_raises(ValueError, gat_.fit, epochs) gat_.train_times = dict(length=999.) assert_raises(ValueError, gat_.fit, epochs) # Test testing time parameters # --- outside time range gat.test_times = dict(start=-999.) assert_raises(ValueError, gat.predict, epochs) gat.test_times = dict(start=999.) assert_raises(ValueError, gat.predict, epochs) # --- impossible slices gat.test_times = dict(step=.000001) assert_raises(ValueError, gat.predict, epochs) gat_ = copy.deepcopy(gat) gat_.train_times_['length'] = .000001 gat_.test_times = dict(length=.000001) assert_raises(ValueError, gat_.predict, epochs) # --- test time region of interest gat.test_times = dict(step=.150) gat.predict(epochs) assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1)) # --- silly value gat.test_times = 'foo' assert_raises(ValueError, gat.predict, epochs) assert_raises(RuntimeError, gat.score) # --- unmatched length between training and testing time gat.test_times = dict(length=.150) assert_raises(ValueError, gat.predict, epochs) svc = SVC(C=1, kernel='linear', probability=True) gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction') with warnings.catch_warnings(record=True): gat.fit(epochs) # sklearn needs it: c.f. # https://github.com/scikit-learn/scikit-learn/issues/2723 # and http://bit.ly/1u7t8UT assert_raises(ValueError, gat.score, epochs2) gat.score(epochs) scores = sum(scores, []) # flatten assert_true(0.0 <= np.min(scores) <= 1.0) assert_true(0.0 <= np.max(scores) <= 1.0) # Test that gets error if train on one dataset, test on another, and don't # specify appropriate cv: gat = GeneralizationAcrossTime() with warnings.catch_warnings(record=True): gat.fit(epochs) gat.predict(epochs) assert_raises(ValueError, gat.predict, epochs[:10]) # Check that still works with classifier that output y_pred with # shape = (n_trials, 1) instead of (n_trials,) gat = GeneralizationAcrossTime(clf=RANSACRegressor(LinearRegression()), cv=2) epochs.crop(None, epochs.times[2]) gat.fit(epochs) gat.predict(epochs) # Test combinations of complex scenarios # 2 or more distinct classes n_classes = [2, 4] # 4 tested # nicely ordered labels or not le = LabelEncoder() y = le.fit_transform(epochs.events[:, 2]) y[len(y) // 2:] += 2 ys = (y, y + 1000) # Univariate and multivariate prediction svc = SVC(C=1, kernel='linear') class SVC_proba(SVC): def predict(self, x): probas = super(SVC_proba, self).predict_proba(x) return probas[:, 0] svcp = SVC_proba(C=1, kernel='linear', probability=True) clfs = [svc, svcp] scorers = [None, mean_squared_error] # Test all combinations for clf, scorer in zip(clfs, scorers): for y in ys: for n_class in n_classes: y_ = y % n_class with warnings.catch_warnings(record=True): gat = GeneralizationAcrossTime(cv=2, clf=clf, scorer=scorer) gat.fit(epochs, y=y_) gat.score(epochs, y=y_) @requires_sklearn def test_decoding_time(): """Test TimeDecoding """ epochs = make_epochs() tg = TimeDecoding() assert_equal("<TimeDecoding | no fit, no prediction, no score>", '%s' % tg) assert_true(hasattr(tg, 'times')) assert_true(not hasattr(tg, 'train_times')) assert_true(not hasattr(tg, 'test_times')) tg.fit(epochs) assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 " "(s), no prediction, no score>", '%s' % tg) assert_true(not hasattr(tg, 'train_times_')) assert_true(not hasattr(tg, 'test_times_')) assert_raises(RuntimeError, tg.score, epochs=None) tg.predict(epochs) assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 " "(s), predicted 14 epochs, no score>", '%s' % tg) assert_array_equal(np.shape(tg.y_pred_), [15, 14, 1]) tg.score(epochs) tg.score() assert_array_equal(np.shape(tg.scores_), [15]) assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 " "(s), predicted 14 epochs,\n scored (accuracy_score)>", '%s' % tg) run_tests_if_main()
bsd-3-clause
sryza/freewaydata
python/playaround.py
1
1776
import pandas as pd import matplotlib.pyplot as plot import pylab import plotonmap from sklearn.cluster import KMeans map_template_path = 'html/showfreeways.html.template' pylab.show() pylab.ion() # load stuff colnames = ['timestamp', 'station', 'district', 'route', 'direction', 'lanetype', 'stationlen', 'samples', 'percentobserved', 'totalflow', 'avgoccupancy', 'avgspeed', 'delay35', 'delay40', 'delay45', 'delay50', 'delay55', 'delay60'] for i in range(1,9): colnames.extend(['laneflow' + str(i), 'laneavgoccupancy' + str(i), 'laneavgspeed' + str(i)]) rawdata = pd.read_csv('d07_text_station_hour_2013_01.txt', header=None, names=colnames, parse_dates=[0]) station_data = plotonmap.load_station_data('d07_stations_2012_09_06.txt') bystation = rawdata.groupby('station') # plot a station across time: # rawdata[rawdata['station'] == 715933]['avgspeed'].plot() # TODO: find out which station has the max and min avg speed # TODO: how many stations report on multiple lanes. make a histogram of of number of lanes. # find out correlation between speed and flow for each station. do a scatterplot of this correlation by station. station_corrs = {} for group in bystation: stationdata = group[1] corr = stationdata[['avgspeed', 'totalflow']].corr() station_corrs[group[0]] = corr['totalflow']['avgspeed'] #plot.hist(station_corrs.values(), bins=20, range=(-1, 1)) # plot on map for (sid, corr) in station_corrs.iteritems(): station_corrs[sid] = (corr + 1.0) / 2.0 plotonmap.plot_on_map(station_corrs, station_data, map_template_path, 'stationcorrs.html') # TODO: cluster stations by correlations and plot clusters # kmeans = KMeans(init='k-means++', n_clusters=5, n_init=10) # kmeans.fit(corrs) # TODO # get time sequences # plot distribution of
apache-2.0
zaxtax/scikit-learn
sklearn/datasets/tests/test_base.py
33
7160
import os import shutil import tempfile import warnings import nose import numpy from pickle import loads from pickle import dumps from sklearn.datasets import get_data_home from sklearn.datasets import clear_data_home from sklearn.datasets import load_files from sklearn.datasets import load_sample_images from sklearn.datasets import load_sample_image from sklearn.datasets import load_digits from sklearn.datasets import load_diabetes from sklearn.datasets import load_linnerud from sklearn.datasets import load_iris from sklearn.datasets import load_breast_cancer from sklearn.datasets import load_boston from sklearn.datasets.base import Bunch from sklearn.externals.six import b, u from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_") LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_") TEST_CATEGORY_DIR1 = "" TEST_CATEGORY_DIR2 = "" def _remove_dir(path): if os.path.isdir(path): shutil.rmtree(path) def teardown_module(): """Test fixture (clean up) run once after all tests of this module""" for path in [DATA_HOME, LOAD_FILES_ROOT]: _remove_dir(path) def setup_load_files(): global TEST_CATEGORY_DIR1 global TEST_CATEGORY_DIR2 TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT) TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT) sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1, delete=False) sample_file.write(b("Hello World!\n")) sample_file.close() def teardown_load_files(): _remove_dir(TEST_CATEGORY_DIR1) _remove_dir(TEST_CATEGORY_DIR2) def test_data_home(): # get_data_home will point to a pre-existing folder data_home = get_data_home(data_home=DATA_HOME) assert_equal(data_home, DATA_HOME) assert_true(os.path.exists(data_home)) # clear_data_home will delete both the content and the folder it-self clear_data_home(data_home=data_home) assert_false(os.path.exists(data_home)) # if the folder is missing it will be created again data_home = get_data_home(data_home=DATA_HOME) assert_true(os.path.exists(data_home)) def test_default_empty_load_files(): res = load_files(LOAD_FILES_ROOT) assert_equal(len(res.filenames), 0) assert_equal(len(res.target_names), 0) assert_equal(res.DESCR, None) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_default_load_files(): res = load_files(LOAD_FILES_ROOT) assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 2) assert_equal(res.DESCR, None) assert_equal(res.data, [b("Hello World!\n")]) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_load_files_w_categories_desc_and_encoding(): category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop() res = load_files(LOAD_FILES_ROOT, description="test", categories=category, encoding="utf-8") assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 1) assert_equal(res.DESCR, "test") assert_equal(res.data, [u("Hello World!\n")]) @nose.tools.with_setup(setup_load_files, teardown_load_files) def test_load_files_wo_load_content(): res = load_files(LOAD_FILES_ROOT, load_content=False) assert_equal(len(res.filenames), 1) assert_equal(len(res.target_names), 2) assert_equal(res.DESCR, None) assert_equal(res.get('data'), None) def test_load_sample_images(): try: res = load_sample_images() assert_equal(len(res.images), 2) assert_equal(len(res.filenames), 2) assert_true(res.DESCR) except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_digits(): digits = load_digits() assert_equal(digits.data.shape, (1797, 64)) assert_equal(numpy.unique(digits.target).size, 10) def test_load_digits_n_class_lt_10(): digits = load_digits(9) assert_equal(digits.data.shape, (1617, 64)) assert_equal(numpy.unique(digits.target).size, 9) def test_load_sample_image(): try: china = load_sample_image('china.jpg') assert_equal(china.dtype, 'uint8') assert_equal(china.shape, (427, 640, 3)) except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_missing_sample_image_error(): have_PIL = True try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: have_PIL = False if have_PIL: assert_raises(AttributeError, load_sample_image, 'blop.jpg') else: warnings.warn("Could not load sample images, PIL is not available.") def test_load_diabetes(): res = load_diabetes() assert_equal(res.data.shape, (442, 10)) assert_true(res.target.size, 442) def test_load_linnerud(): res = load_linnerud() assert_equal(res.data.shape, (20, 3)) assert_equal(res.target.shape, (20, 3)) assert_equal(len(res.target_names), 3) assert_true(res.DESCR) def test_load_iris(): res = load_iris() assert_equal(res.data.shape, (150, 4)) assert_equal(res.target.size, 150) assert_equal(res.target_names.size, 3) assert_true(res.DESCR) def test_load_breast_cancer(): res = load_breast_cancer() assert_equal(res.data.shape, (569, 30)) assert_equal(res.target.size, 569) assert_equal(res.target_names.size, 2) assert_true(res.DESCR) def test_load_boston(): res = load_boston() assert_equal(res.data.shape, (506, 13)) assert_equal(res.target.size, 506) assert_equal(res.feature_names.size, 13) assert_true(res.DESCR) def test_loads_dumps_bunch(): bunch = Bunch(x="x") bunch_from_pkl = loads(dumps(bunch)) bunch_from_pkl.x = "y" assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x) def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): bunch = Bunch(key='original') # This reproduces a problem when Bunch pickles have been created # with scikit-learn 0.16 and are read with 0.17. Basically there # is a suprising behaviour because reading bunch.key uses # bunch.__dict__ (which is non empty for 0.16 Bunch objects) # whereas assigning into bunch.key uses bunch.__setattr__. See # https://github.com/scikit-learn/scikit-learn/issues/6196 for # more details bunch.__dict__['key'] = 'set from __dict__' bunch_from_pkl = loads(dumps(bunch)) # After loading from pickle the __dict__ should have been ignored assert_equal(bunch_from_pkl.key, 'original') assert_equal(bunch_from_pkl['key'], 'original') # Making sure that changing the attr does change the value # associated with __getitem__ as well bunch_from_pkl.key = 'changed' assert_equal(bunch_from_pkl.key, 'changed') assert_equal(bunch_from_pkl['key'], 'changed')
bsd-3-clause
jpmml/sklearn2pmml
sklearn2pmml/preprocessing/xgboost.py
1
1720
from sklearn_pandas import DataFrameMapper from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelBinarizer, OneHotEncoder, OrdinalEncoder from sklearn2pmml import _is_categorical from sklearn2pmml.preprocessing import PMMLLabelBinarizer def make_xgboost_dataframe_mapper(dtypes, missing_value_aware = True): """Construct a DataFrameMapper for feeding complex data into an XGBModel. Parameters ---------- dtypes: iterable of tuples (column, dtype) missing_value_aware: boolean If true, use missing value aware transformers. Returns ------- DataFrameMapper """ features = list() for column, dtype in dtypes.items(): if _is_categorical(dtype): features.append(([column], PMMLLabelBinarizer(sparse_output = True) if missing_value_aware else LabelBinarizer(sparse_output = True))) else: features.append(([column], None)) return DataFrameMapper(features) def make_xgboost_column_transformer(dtypes, missing_value_aware = True): """Construct a ColumnTransformer for feeding complex data into an XGBModel. Parameters ---------- dtypes: iterable of tuples (column, dtype) missing_value_aware: boolean If true, use missing value aware transformers. Returns ------- ColumnTransformer """ transformers = list() for column, dtype in dtypes.items(): if _is_categorical(dtype): transformers.append((str(column), PMMLLabelBinarizer(sparse_output = True) if missing_value_aware else Pipeline([("ordinal_encoder", OrdinalEncoder()), ("one_hot_encoder", OneHotEncoder())]), [column])) else: transformers.append((str(column), "passthrough", [column])) return ColumnTransformer(transformers, remainder = "drop")
agpl-3.0
Ecogenomics/CheckM
setup.py
1
1246
#!/usr/bin/env python3 import os from setuptools import setup def version(): setupDir = os.path.dirname(os.path.realpath(__file__)) versionFile = open(os.path.join(setupDir, 'checkm', 'VERSION')) return versionFile.readline().strip() setup( name='checkm-genome', version=version(), author='Donovan Parks, Michael Imelfort, Connor Skennerton', author_email='donovan.parks@gmail.com', packages=['checkm', 'checkm.plot', 'checkm.test', 'checkm.util'], scripts=['bin/checkm'], package_data={'checkm': ['VERSION', 'DATA_CONFIG']}, include_package_data=True, url='http://pypi.python.org/pypi/checkm/', license='GPL3', description='Assess the quality of putative genome bins.', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Natural Language :: English', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering :: Bio-Informatics', ], install_requires=[ "numpy >= 1.13.1", "scipy >= 0.19.1", "matplotlib >= 2.1.0", "pysam >= 0.12.0.1", "dendropy >= 4.4.0", "setuptools"], zip_safe=False )
gpl-3.0
brianmingus/sklearn-emergent
emergent_sklearn.py
1
5839
from pprint import pprint import inspect import socket; socket.setdefaulttimeout(.2) # TODO: may want to tune this import json import numpy from time import sleep import sklearn from sklearn.utils.estimator_checks import check_estimator from sklearn.base import BaseEstimator, RegressorMixin # http://stackoverflow.com/questions/23866833/whats-the-full-specification-for-implementing-a-custom-scikit-learn-estimator class Transport: def __init__(self, address = '127.0.0.1', port = 5360, buffer_size = 1024): self.buf_size = buffer_size self.address='127.0.0.1' self.port=5360 self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect((self.address, self.port)) def __del__(self): self.s.close() def flush(self): while True: try: data = self.s.recv(self.buf_size) print "Receiving from server: " + str(data); except socket.timeout, se: break if not data: break def read_string(self): total_data=[] while True: try: data = self.s.recv(self.buf_size) except socket.timeout, se: break if not data: break total_data.append(data) print "Receiving from server: " + str(total_data); return ''.join(total_data) def read_json(self): try: result = json.loads(self.read_string()) except: print "Could not parse json" result = {} return result def send_json(self, obj): self.s.send(json.dumps(obj)+'\n') print "Sending to server: " + json.dumps(obj) + '\n' class EmergentSklearnRegressor(BaseEstimator, RegressorMixin): # All estimators should specify all the parameters that can be set # at the class level in their ``__init__`` as explicit keyword # arguments (no ``*args`` or ``**kwargs``). def __init__(self, transport, lrate=.1): self.lrate = lrate self.transport = transport self.banner = self.transport.read_string() print "banner", self.banner def set_member(self, path, member, value): try: self.transport.send_json({"command": "SetMember", "path": path, "member": member, "var_value": value}) except Exception, e: print str(e) return False else: self.transport.flush() return True def set_input_data(self, x, y): try: x = x / x.max(axis=0) input_rows = [] for row in x.tolist(): input_rows.append([row]) pprint(input_rows) output_rows = [] for row_i in range(len(input_rows)): cur_val = numpy.zeros((len(set(y)),1)).tolist() cur_row = [cur_val] cur_row[0][y[row_i]] = 1.0 output_rows.append(cur_row) json_obj = {"command": "SetData", "table": "StdInputData", "create": False, "data": {"columns": [{"name":"Name", "type": "String", "values": ["HEllo World"]}, {"name": "Input", "matrix": True, "type": "float", "values": input_rows}, {"name": "Output", "matrix": True, "type": "float", "values": output_rows}]}} self.transport.send_json(json_obj) except Exception, e: print str(e) return False else: self.transport.flush() return True def run_program(self, prog_name): try: self.transport.send_json({"command": "RunProgramAsync", "program": prog_name}) sleep(0.5) result = self.transport.read_json() if (result["status"] <> "OK"): print (result["error"]) return False self.transport.send_json({"command": "GetRunState"}) result = self.transport.read_json() while (result["result"] == "2"): sleep(.5) self.transport.send_json({"command": "GetRunState"}) result = self.transport.read_json() pprint(result) except Exception, e: print str(e) return False else: self.transport.flush() return True # All logic behind estimator parameters, like translating string # arguments into functions, should be done in fit def fit(self, X, y): self.set_member(".networks.layers.Input.un_geom", "x", X.shape[1]) self.set_member(".networks.layers.Input.un_geom", "y", 1) self.set_member(".networks.layers.Output.un_geom", "x", len(set(y))) self.set_member(".networks.layers.Output.un_geom", "y", 1) if (not self.run_program("SklearnConfigNet")): return self.set_input_data(X, y) self.run_program("MasterTrain") return self def predict(self, X): print "Predicting" return numpy.full((len(X[:,1]), 1), 1) emer_sklearn = EmergentSklearnRegressor(transport=Transport()) input_data = sklearn.datasets.load_iris()['data'] output_data = sklearn.datasets.load_iris()['target'] emer_sklearn.fit(input_data, output_data) # result = emer_sklearn.predict(X_test) del emer_sklearn # check_estimator(EmergentSklearnRegressor)
gpl-3.0
hsuantien/scikit-learn
sklearn/decomposition/tests/test_fastica.py
272
7798
""" Test the fastica algorithm. """ import itertools import warnings import numpy as np from scipy import stats from nose.tools import assert_raises from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_warns from sklearn.decomposition import FastICA, fastica, PCA from sklearn.decomposition.fastica_ import _gs_decorrelation from sklearn.externals.six import moves def center_and_norm(x, axis=-1): """ Centers and norms x **in place** Parameters ----------- x: ndarray Array with an axis of observations (statistical units) measured on random variables. axis: int, optional Axis along which the mean and variance are calculated. """ x = np.rollaxis(x, axis) x -= x.mean(axis=0) x /= x.std(axis=0) def test_gs(): # Test gram schmidt orthonormalization # generate a random orthogonal matrix rng = np.random.RandomState(0) W, _, _ = np.linalg.svd(rng.randn(10, 10)) w = rng.randn(10) _gs_decorrelation(w, W, 10) assert_less((w ** 2).sum(), 1.e-10) w = rng.randn(10) u = _gs_decorrelation(w, W, 5) tmp = np.dot(u, W.T) assert_less((tmp[:5] ** 2).sum(), 1.e-10) def test_fastica_simple(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) # scipy.stats uses the global RNG: np.random.seed(0) n_samples = 1000 # Generate two sources: s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 s2 = stats.t.rvs(1, size=n_samples) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing angle phi = 0.6 mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(2, 1000) center_and_norm(m) # function as fun arg def g_test(x): return x ** 3, (3 * x ** 2).mean(axis=-1) algos = ['parallel', 'deflation'] nls = ['logcosh', 'exp', 'cube', g_test] whitening = [True, False] for algo, nl, whiten in itertools.product(algos, nls, whitening): if whiten: k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo) assert_raises(ValueError, fastica, m.T, fun=np.tanh, algorithm=algo) else: X = PCA(n_components=2, whiten=True).fit_transform(m.T) k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False) assert_raises(ValueError, fastica, X, fun=np.tanh, algorithm=algo) s_ = s_.T # Check that the mixing model described in the docstring holds: if whiten: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) else: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1) # Test FastICA class _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0) ica = FastICA(fun=nl, algorithm=algo, random_state=0) sources = ica.fit_transform(m.T) assert_equal(ica.components_.shape, (2, 2)) assert_equal(sources.shape, (1000, 2)) assert_array_almost_equal(sources_fun, sources) assert_array_almost_equal(sources, ica.transform(m.T)) assert_equal(ica.mixing_.shape, (2, 2)) for fn in [np.tanh, "exp(-.5(x^2))"]: ica = FastICA(fun=fn, algorithm=algo, random_state=0) assert_raises(ValueError, ica.fit, m.T) assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T) def test_fastica_nowhiten(): m = [[0, 1], [1, 0]] # test for issue #697 ica = FastICA(n_components=1, whiten=False, random_state=0) assert_warns(UserWarning, ica.fit, m) assert_true(hasattr(ica, 'mixing_')) def test_non_square_fastica(add_noise=False): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(6, n_samples) center_and_norm(m) k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng) s_ = s_.T # Check that the mixing model described in the docstring holds: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3) def test_fit_transform(): # Test FastICA.fit_transform rng = np.random.RandomState(0) X = rng.random_sample((100, 10)) for whiten, n_components in [[True, 5], [False, None]]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) Xt = ica.fit_transform(X) assert_equal(ica.components_.shape, (n_components_, 10)) assert_equal(Xt.shape, (100, n_components_)) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) ica.fit(X) assert_equal(ica.components_.shape, (n_components_, 10)) Xt2 = ica.transform(X) assert_array_almost_equal(Xt, Xt2) def test_inverse_transform(): # Test FastICA.inverse_transform n_features = 10 n_samples = 100 n1, n2 = 5, 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) expected = {(True, n1): (n_features, n1), (True, n2): (n_features, n2), (False, n1): (n_features, n2), (False, n2): (n_features, n2)} for whiten in [True, False]: for n_components in [n1, n2]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) with warnings.catch_warnings(record=True): # catch "n_components ignored" warning Xt = ica.fit_transform(X) expected_shape = expected[(whiten, n_components_)] assert_equal(ica.mixing_.shape, expected_shape) X2 = ica.inverse_transform(Xt) assert_equal(X.shape, X2.shape) # reversibility test in non-reduction case if n_components == X.shape[1]: assert_array_almost_equal(X, X2)
bsd-3-clause
analogdevicesinc/gnuradio
gr-filter/examples/channelize.py
58
7003
#!/usr/bin/env python # # Copyright 2009,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr from gnuradio import blocks from gnuradio import filter import sys, time try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) try: import scipy from scipy import fftpack except ImportError: sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n") sys.exit(1) try: import pylab from pylab import mlab except ImportError: sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n") sys.exit(1) class pfb_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) self._N = 2000000 # number of samples to use self._fs = 1000 # initial sampling rate self._M = M = 9 # Number of channels to channelize self._ifs = M*self._fs # initial sampling rate # Create a set of taps for the PFB channelizer self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50, attenuation_dB=100, window=filter.firdes.WIN_BLACKMAN_hARRIS) # Calculate the number of taps per channel for our own information tpc = scipy.ceil(float(len(self._taps)) / float(self._M)) print "Number of taps: ", len(self._taps) print "Number of channels: ", self._M print "Taps per channel: ", tpc # Create a set of signals at different frequencies # freqs lists the frequencies of the signals that get stored # in the list "signals", which then get summed together self.signals = list() self.add = blocks.add_cc() freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80] for i in xrange(len(freqs)): f = freqs[i] + (M/2-M+i+1)*self._fs self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1)) self.connect(self.signals[i], (self.add,i)) self.head = blocks.head(gr.sizeof_gr_complex, self._N) # Construct the channelizer filter self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1) # Construct a vector sink for the input signal to the channelizer self.snk_i = blocks.vector_sink_c() # Connect the blocks self.connect(self.add, self.head, self.pfb) self.connect(self.add, self.snk_i) # Use this to play with the channel mapping #self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4]) # Create a vector sink for each of M output channels of the filter and connect it self.snks = list() for i in xrange(self._M): self.snks.append(blocks.vector_sink_c()) self.connect((self.pfb, i), self.snks[i]) def main(): tstart = time.time() tb = pfb_top_block() tb.run() tend = time.time() print "Run time: %f" % (tend - tstart) if 1: fig_in = pylab.figure(1, figsize=(16,9), facecolor="w") fig1 = pylab.figure(2, figsize=(16,9), facecolor="w") fig2 = pylab.figure(3, figsize=(16,9), facecolor="w") Ns = 1000 Ne = 10000 fftlen = 8192 winfunc = scipy.blackman fs = tb._ifs # Plot the input signal on its own figure d = tb.snk_i.data()[Ns:Ne] spin_f = fig_in.add_subplot(2, 1, 1) X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs, window = lambda d: d*winfunc(fftlen), scale_by_freq=True) X_in = 10.0*scipy.log10(abs(X)) f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size)) pin_f = spin_f.plot(f_in, X_in, "b") spin_f.set_xlim([min(f_in), max(f_in)+1]) spin_f.set_ylim([-200.0, 50.0]) spin_f.set_title("Input Signal", weight="bold") spin_f.set_xlabel("Frequency (Hz)") spin_f.set_ylabel("Power (dBW)") Ts = 1.0/fs Tmax = len(d)*Ts t_in = scipy.arange(0, Tmax, Ts) x_in = scipy.array(d) spin_t = fig_in.add_subplot(2, 1, 2) pin_t = spin_t.plot(t_in, x_in.real, "b") pin_t = spin_t.plot(t_in, x_in.imag, "r") spin_t.set_xlabel("Time (s)") spin_t.set_ylabel("Amplitude") Ncols = int(scipy.floor(scipy.sqrt(tb._M))) Nrows = int(scipy.floor(tb._M / Ncols)) if(tb._M % Ncols != 0): Nrows += 1 # Plot each of the channels outputs. Frequencies on Figure 2 and # time signals on Figure 3 fs_o = tb._fs Ts_o = 1.0/fs_o Tmax_o = len(d)*Ts_o for i in xrange(len(tb.snks)): # remove issues with the transients at the beginning # also remove some corruption at the end of the stream # this is a bug, probably due to the corner cases d = tb.snks[i].data()[Ns:Ne] sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i) X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o, window = lambda d: d*winfunc(fftlen), scale_by_freq=True) X_o = 10.0*scipy.log10(abs(X)) f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size)) p2_f = sp1_f.plot(f_o, X_o, "b") sp1_f.set_xlim([min(f_o), max(f_o)+1]) sp1_f.set_ylim([-200.0, 50.0]) sp1_f.set_title(("Channel %d" % i), weight="bold") sp1_f.set_xlabel("Frequency (Hz)") sp1_f.set_ylabel("Power (dBW)") x_o = scipy.array(d) t_o = scipy.arange(0, Tmax_o, Ts_o) sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i) p2_o = sp2_o.plot(t_o, x_o.real, "b") p2_o = sp2_o.plot(t_o, x_o.imag, "r") sp2_o.set_xlim([min(t_o), max(t_o)+1]) sp2_o.set_ylim([-2, 2]) sp2_o.set_title(("Channel %d" % i), weight="bold") sp2_o.set_xlabel("Time (s)") sp2_o.set_ylabel("Amplitude") pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
michigraber/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/nltk/probability.py
12
81647
# -*- coding: utf-8 -*- # Natural Language Toolkit: Probability and Statistics # # Copyright (C) 2001-2012 NLTK Project # Author: Edward Loper <edloper@gradient.cis.upenn.edu> # Steven Bird <sb@csse.unimelb.edu.au> (additions) # Trevor Cohn <tacohn@cs.mu.oz.au> (additions) # Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions) # Liang Dong <ldong@clemson.edu> (additions) # Geoffrey Sampson <sampson@cantab.net> (additions) # # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ Classes for representing and processing probabilistic information. The ``FreqDist`` class is used to encode "frequency distributions", which count the number of times that each outcome of an experiment occurs. The ``ProbDistI`` class defines a standard interface for "probability distributions", which encode the probability of each outcome for an experiment. There are two types of probability distribution: - "derived probability distributions" are created from frequency distributions. They attempt to model the probability distribution that generated the frequency distribution. - "analytic probability distributions" are created directly from parameters (such as variance). The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface are used to encode conditional distributions. Conditional probability distributions can be derived or analytic; but currently the only implementation of the ``ConditionalProbDistI`` interface is ``ConditionalProbDist``, a derived distribution. """ _NINF = float('-1e300') import math import random import warnings from operator import itemgetter from itertools import imap, islice from collections import defaultdict ##////////////////////////////////////////////////////// ## Frequency Distributions ##////////////////////////////////////////////////////// # [SB] inherit from defaultdict? # [SB] for NLTK 3.0, inherit from collections.Counter? class FreqDist(dict): """ A frequency distribution for the outcomes of an experiment. A frequency distribution records the number of times each outcome of an experiment has occurred. For example, a frequency distribution could be used to record the frequency of each word type in a document. Formally, a frequency distribution can be defined as a function mapping from each sample to the number of times that sample occurred as an outcome. Frequency distributions are generally constructed by running a number of experiments, and incrementing the count for a sample every time it is an outcome of an experiment. For example, the following code will produce a frequency distribution that encodes how often each word occurs in a text: >>> from nltk.tokenize import word_tokenize >>> from nltk.probability import FreqDist >>> sent = 'This is an example sentence' >>> fdist = FreqDist() >>> for word in word_tokenize(sent): ... fdist.inc(word.lower()) An equivalent way to do this is with the initializer: >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent)) """ def __init__(self, samples=None): """ Construct a new frequency distribution. If ``samples`` is given, then the frequency distribution will be initialized with the count of each object in ``samples``; otherwise, it will be initialized to be empty. In particular, ``FreqDist()`` returns an empty frequency distribution; and ``FreqDist(samples)`` first creates an empty frequency distribution, and then calls ``update`` with the list ``samples``. :param samples: The samples to initialize the frequency distribution with. :type samples: Sequence """ dict.__init__(self) self._N = 0 self._reset_caches() if samples: self.update(samples) def inc(self, sample, count=1): """ Increment this FreqDist's count for the given sample. :param sample: The sample whose count should be incremented. :type sample: any :param count: The amount to increment the sample's count by. :type count: int :rtype: None :raise NotImplementedError: If ``sample`` is not a supported sample type. """ if count == 0: return self[sample] = self.get(sample,0) + count def __setitem__(self, sample, value): """ Set this FreqDist's count for the given sample. :param sample: The sample whose count should be incremented. :type sample: any hashable object :param count: The new value for the sample's count :type count: int :rtype: None :raise TypeError: If ``sample`` is not a supported sample type. """ self._N += (value - self.get(sample, 0)) dict.__setitem__(self, sample, value) # Invalidate the caches self._reset_caches() def N(self): """ Return the total number of sample outcomes that have been recorded by this FreqDist. For the number of unique sample values (or bins) with counts greater than zero, use ``FreqDist.B()``. :rtype: int """ return self._N def B(self): """ Return the total number of sample values (or "bins") that have counts greater than zero. For the total number of sample outcomes recorded, use ``FreqDist.N()``. (FreqDist.B() is the same as len(FreqDist).) :rtype: int """ return len(self) def samples(self): """ Return a list of all samples that have been recorded as outcomes by this frequency distribution. Use ``fd[sample]`` to determine the count for each sample. :rtype: list """ return self.keys() def hapaxes(self): """ Return a list of all samples that occur once (hapax legomena) :rtype: list """ return [item for item in self if self[item] == 1] def Nr(self, r, bins=None): """ Return the number of samples with count r. :type r: int :param r: A sample count. :type bins: int :param bins: The number of possible sample outcomes. ``bins`` is used to calculate Nr(0). In particular, Nr(0) is ``bins-self.B()``. If ``bins`` is not specified, it defaults to ``self.B()`` (so Nr(0) will be 0). :rtype: int """ if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative' # Special case for Nr(0): if r == 0: if bins is None: return 0 else: return bins-self.B() # We have to search the entire distribution to find Nr. Since # this is an expensive operation, and is likely to be used # repeatedly, cache the results. if self._Nr_cache is None: self._cache_Nr_values() if r >= len(self._Nr_cache): return 0 return self._Nr_cache[r] def _cache_Nr_values(self): Nr = [0] for sample in self: c = self.get(sample, 0) if c >= len(Nr): Nr += [0]*(c+1-len(Nr)) Nr[c] += 1 self._Nr_cache = Nr def _cumulative_frequencies(self, samples=None): """ Return the cumulative frequencies of the specified samples. If no samples are specified, all counts are returned, starting with the largest. :param samples: the samples whose frequencies should be returned. :type sample: any :rtype: list(float) """ cf = 0.0 if not samples: samples = self.keys() for sample in samples: cf += self[sample] yield cf # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs, # here, freq() does probs def freq(self, sample): """ Return the frequency of a given sample. The frequency of a sample is defined as the count of that sample divided by the total number of sample outcomes that have been recorded by this FreqDist. The count of a sample is defined as the number of times that sample outcome was recorded by this FreqDist. Frequencies are always real numbers in the range [0, 1]. :param sample: the sample whose frequency should be returned. :type sample: any :rtype: float """ if self._N is 0: return 0 return float(self[sample]) / self._N def max(self): """ Return the sample with the greatest number of outcomes in this frequency distribution. If two or more samples have the same number of outcomes, return one of them; which sample is returned is undefined. If no outcomes have occurred in this frequency distribution, return None. :return: The sample with the maximum number of outcomes in this frequency distribution. :rtype: any or None """ if self._max_cache is None: if len(self) == 0: raise ValueError('A FreqDist must have at least one sample before max is defined.') self._max_cache = max([(a,b) for (b,a) in self.items()])[1] return self._max_cache def plot(self, *args, **kwargs): """ Plot samples from the frequency distribution displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. If two integer parameters m, n are supplied, plot a subset of the samples, beginning with m and stopping at n-1. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param title: The title for the graph :type title: str :param cumulative: A flag to specify whether the plot is cumulative (default = False) :type title: bool """ try: import pylab except ImportError: raise ValueError('The plot function requires the matplotlib package (aka pylab). ' 'See http://matplotlib.sourceforge.net/') if len(args) == 0: args = [len(self)] samples = list(islice(self, *args)) cumulative = _get_kwarg(kwargs, 'cumulative', False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" else: freqs = [self[sample] for sample in samples] ylabel = "Counts" # percents = [f * 100 for f in freqs] only in ProbDist? pylab.grid(True, color="silver") if not "linewidth" in kwargs: kwargs["linewidth"] = 2 if "title" in kwargs: pylab.title(kwargs["title"]) del kwargs["title"] pylab.plot(freqs, **kwargs) pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show() def tabulate(self, *args, **kwargs): """ Tabulate the given samples from the frequency distribution (cumulative), displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. If two integer parameters m, n are supplied, plot a subset of the samples, beginning with m and stopping at n-1. (Requires Matplotlib to be installed.) :param samples: The samples to plot (default is all samples) :type samples: list """ if len(args) == 0: args = [len(self)] samples = list(islice(self, *args)) cumulative = _get_kwarg(kwargs, 'cumulative', False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) else: freqs = [self[sample] for sample in samples] # percents = [f * 100 for f in freqs] only in ProbDist? for i in range(len(samples)): print "%4s" % str(samples[i]), print for i in range(len(samples)): print "%4d" % freqs[i], print def _sort_keys_by_value(self): if not self._item_cache: self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0])) def keys(self): """ Return the samples sorted in decreasing order of frequency. :rtype: list(any) """ self._sort_keys_by_value() return map(itemgetter(0), self._item_cache) def values(self): """ Return the samples sorted in decreasing order of frequency. :rtype: list(any) """ self._sort_keys_by_value() return map(itemgetter(1), self._item_cache) def items(self): """ Return the items sorted in decreasing order of frequency. :rtype: list(tuple) """ self._sort_keys_by_value() return self._item_cache[:] def __iter__(self): """ Return the samples sorted in decreasing order of frequency. :rtype: iter """ return iter(self.keys()) def iterkeys(self): """ Return the samples sorted in decreasing order of frequency. :rtype: iter """ return iter(self.keys()) def itervalues(self): """ Return the values sorted in decreasing order. :rtype: iter """ return iter(self.values()) def iteritems(self): """ Return the items sorted in decreasing order of frequency. :rtype: iter of any """ self._sort_keys_by_value() return iter(self._item_cache) def copy(self): """ Create a copy of this frequency distribution. :rtype: FreqDist """ return self.__class__(self) def update(self, samples): """ Update the frequency distribution with the provided list of samples. This is a faster way to add multiple samples to the distribution. :param samples: The samples to add. :type samples: list """ try: sample_iter = samples.iteritems() except: sample_iter = imap(lambda x: (x,1), samples) for sample, count in sample_iter: self.inc(sample, count=count) def pop(self, other): self._N -= 1 self._reset_caches() return dict.pop(self, other) def popitem(self): self._N -= 1 self._reset_caches() return dict.popitem(self) def clear(self): self._N = 0 self._reset_caches() dict.clear(self) def _reset_caches(self): self._Nr_cache = None self._max_cache = None self._item_cache = None def __add__(self, other): clone = self.copy() clone.update(other) return clone def __le__(self, other): if not isinstance(other, FreqDist): return False return set(self).issubset(other) and all(self[key] <= other[key] for key in self) def __lt__(self, other): if not isinstance(other, FreqDist): return False return self <= other and self != other def __ge__(self, other): if not isinstance(other, FreqDist): return False return other <= self def __gt__(self, other): if not isinstance(other, FreqDist): return False return other < self def __repr__(self): """ Return a string representation of this FreqDist. :rtype: string """ return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N()) def __str__(self): """ Return a string representation of this FreqDist. :rtype: string """ items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]] if len(self) > 10: items.append('...') return '<FreqDist: %s>' % ', '.join(items) def __getitem__(self, sample): return self.get(sample, 0) ##////////////////////////////////////////////////////// ## Probability Distributions ##////////////////////////////////////////////////////// class ProbDistI(object): """ A probability distribution for the outcomes of an experiment. A probability distribution specifies how likely it is that an experiment will have any given outcome. For example, a probability distribution could be used to predict the probability that a token in a document will have a given type. Formally, a probability distribution can be defined as a function mapping from samples to nonnegative real numbers, such that the sum of every number in the function's range is 1.0. A ``ProbDist`` is often used to model the probability distribution of the experiment used to generate a frequency distribution. """ SUM_TO_ONE = True """True if the probabilities of the samples in this probability distribution will always sum to one.""" def __init__(self): if self.__class__ == ProbDistI: raise NotImplementedError("Interfaces can't be instantiated") def prob(self, sample): """ Return the probability for a given sample. Probabilities are always real numbers in the range [0, 1]. :param sample: The sample whose probability should be returned. :type sample: any :rtype: float """ raise NotImplementedError() def logprob(self, sample): """ Return the base 2 logarithm of the probability for a given sample. :param sample: The sample whose probability should be returned. :type sample: any :rtype: float """ # Default definition, in terms of prob() p = self.prob(sample) if p == 0: # Use some approximation to infinity. What this does # depends on your system's float implementation. return _NINF else: return math.log(p, 2) def max(self): """ Return the sample with the greatest probability. If two or more samples have the same probability, return one of them; which sample is returned is undefined. :rtype: any """ raise NotImplementedError() def samples(self): """ Return a list of all samples that have nonzero probabilities. Use ``prob`` to find the probability of each sample. :rtype: list """ raise NotImplementedError() # cf self.SUM_TO_ONE def discount(self): """ Return the ratio by which counts are discounted on average: c*/c :rtype: float """ return 0.0 # Subclasses should define more efficient implementations of this, # where possible. def generate(self): """ Return a randomly selected sample from this probability distribution. The probability of returning each sample ``samp`` is equal to ``self.prob(samp)``. """ p = random.random() for sample in self.samples(): p -= self.prob(sample) if p <= 0: return sample # allow for some rounding error: if p < .0001: return sample # we *should* never get here if self.SUM_TO_ONE: warnings.warn("Probability distribution %r sums to %r; generate()" " is returning an arbitrary sample." % (self, 1-p)) return random.choice(list(self.samples())) class UniformProbDist(ProbDistI): """ A probability distribution that assigns equal probability to each sample in a given set; and a zero probability to all other samples. """ def __init__(self, samples): """ Construct a new uniform probability distribution, that assigns equal probability to each sample in ``samples``. :param samples: The samples that should be given uniform probability. :type samples: list :raise ValueError: If ``samples`` is empty. """ if len(samples) == 0: raise ValueError('A Uniform probability distribution must '+ 'have at least one sample.') self._sampleset = set(samples) self._prob = 1.0/len(self._sampleset) self._samples = list(self._sampleset) def prob(self, sample): if sample in self._sampleset: return self._prob else: return 0 def max(self): return self._samples[0] def samples(self): return self._samples def __repr__(self): return '<UniformProbDist with %d samples>' % len(self._sampleset) class DictionaryProbDist(ProbDistI): """ A probability distribution whose probabilities are directly specified by a given dictionary. The given dictionary maps samples to probabilities. """ def __init__(self, prob_dict=None, log=False, normalize=False): """ Construct a new probability distribution from the given dictionary, which maps values to probabilities (or to log probabilities, if ``log`` is true). If ``normalize`` is true, then the probability values are scaled by a constant factor such that they sum to 1. If called without arguments, the resulting probability distribution assigns zero probabiliy to all values. """ if prob_dict is None: self._prob_dict = {} else: self._prob_dict = prob_dict.copy() self._log = log # Normalize the distribution, if requested. if normalize: if log: value_sum = sum_logs(self._prob_dict.values()) if value_sum <= _NINF: logp = math.log(1.0/len(prob_dict), 2) for x in prob_dict: self._prob_dict[x] = logp else: for (x, p) in self._prob_dict.items(): self._prob_dict[x] -= value_sum else: value_sum = sum(self._prob_dict.values()) if value_sum == 0: p = 1.0/len(prob_dict) for x in prob_dict: self._prob_dict[x] = p else: norm_factor = 1.0/value_sum for (x, p) in self._prob_dict.items(): self._prob_dict[x] *= norm_factor def prob(self, sample): if self._log: if sample not in self._prob_dict: return 0 else: return 2**(self._prob_dict[sample]) else: return self._prob_dict.get(sample, 0) def logprob(self, sample): if self._log: return self._prob_dict.get(sample, _NINF) else: if sample not in self._prob_dict: return _NINF elif self._prob_dict[sample] == 0: return _NINF else: return math.log(self._prob_dict[sample], 2) def max(self): if not hasattr(self, '_max'): self._max = max((p,v) for (v,p) in self._prob_dict.items())[1] return self._max def samples(self): return self._prob_dict.keys() def __repr__(self): return '<ProbDist with %d samples>' % len(self._prob_dict) class MLEProbDist(ProbDistI): """ The maximum likelihood estimate for the probability distribution of the experiment used to generate a frequency distribution. The "maximum likelihood estimate" approximates the probability of each sample as the frequency of that sample in the frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the maximum likelihood estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. """ self._freqdist = freqdist def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): return self._freqdist.freq(sample) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ return '<MLEProbDist based on %d samples>' % self._freqdist.N() class LidstoneProbDist(ProbDistI): """ The Lidstone estimate for the probability distribution of the experiment used to generate a frequency distribution. The "Lidstone estimate" is paramaterized by a real number *gamma*, which typically ranges from 0 to 1. The Lidstone estimate approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as ``c+gamma)/(N+B*gamma)``. This is equivalant to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ SUM_TO_ONE = False def __init__(self, freqdist, gamma, bins=None): """ Use the Lidstone estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type gamma: float :param gamma: A real number used to paramaterize the estimate. The Lidstone estimate is equivalant to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ if (bins == 0) or (bins is None and freqdist.N() == 0): name = self.__class__.__name__[:-8] raise ValueError('A %s probability distribution ' % name + 'must have at least one bin.') if (bins is not None) and (bins < freqdist.B()): name = self.__class__.__name__[:-8] raise ValueError('\nThe number of bins in a %s distribution ' % name + '(%d) must be greater than or equal to\n' % bins + 'the number of bins in the FreqDist used ' + 'to create it (%d).' % freqdist.N()) self._freqdist = freqdist self._gamma = float(gamma) self._N = self._freqdist.N() if bins is None: bins = freqdist.B() self._bins = bins self._divisor = self._N + bins * gamma if self._divisor == 0.0: # In extreme cases we force the probability to be 0, # which it will be, since the count will be 0: self._gamma = 0 self._divisor = 1 def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): c = self._freqdist[sample] return (c + self._gamma) / self._divisor def max(self): # For Lidstone distributions, probability is monotonic with # frequency, so the most probable sample is the one that # occurs most frequently. return self._freqdist.max() def samples(self): return self._freqdist.keys() def discount(self): gb = self._gamma * self._bins return gb / (self._N + gb) def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<LidstoneProbDist based on %d samples>' % self._freqdist.N() class LaplaceProbDist(LidstoneProbDist): """ The Laplace estimate for the probability distribution of the experiment used to generate a frequency distribution. The "Laplace estimate" approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as *(c+1)/(N+B)*. This is equivalant to adding one to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the Laplace estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ LidstoneProbDist.__init__(self, freqdist, 1, bins) def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ return '<LaplaceProbDist based on %d samples>' % self._freqdist.N() class ELEProbDist(LidstoneProbDist): """ The expected likelihood estimate for the probability distribution of the experiment used to generate a frequency distribution. The "expected likelihood estimate" approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5 to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the expected likelihood estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ LidstoneProbDist.__init__(self, freqdist, 0.5, bins) def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<ELEProbDist based on %d samples>' % self._freqdist.N() class HeldoutProbDist(ProbDistI): """ The heldout estimate for the probability distribution of the experiment used to generate two frequency distributions. These two frequency distributions are called the "heldout frequency distribution" and the "base frequency distribution." The "heldout estimate" uses uses the "heldout frequency distribution" to predict the probability of each sample, given its frequency in the "base frequency distribution". In particular, the heldout estimate approximates the probability for a sample that occurs *r* times in the base distribution as the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. This average frequency is *Tr[r]/(Nr[r].N)*, where: - *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in the base distribution. - *Nr[r]* is the number of samples that occur *r* times in the base distribution. - *N* is the number of outcomes recorded by the heldout frequency distribution. In order to increase the efficiency of the ``prob`` member function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r* when the ``HeldoutProbDist`` is created. :type _estimate: list(float) :ivar _estimate: A list mapping from *r*, the number of times that a sample occurs in the base distribution, to the probability estimate for that sample. ``_estimate[r]`` is calculated by finding the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. In particular, ``_estimate[r]`` = *Tr[r]/(Nr[r].N)*. :type _max_r: int :ivar _max_r: The maximum number of times that any sample occurs in the base distribution. ``_max_r`` is used to decide how large ``_estimate`` must be. """ SUM_TO_ONE = False def __init__(self, base_fdist, heldout_fdist, bins=None): """ Use the heldout estimate to create a probability distribution for the experiment used to generate ``base_fdist`` and ``heldout_fdist``. :type base_fdist: FreqDist :param base_fdist: The base frequency distribution. :type heldout_fdist: FreqDist :param heldout_fdist: The heldout frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._base_fdist = base_fdist self._heldout_fdist = heldout_fdist # The max number of times any sample occurs in base_fdist. self._max_r = base_fdist[base_fdist.max()] # Calculate Tr, Nr, and N. Tr = self._calculate_Tr() Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)] N = heldout_fdist.N() # Use Tr, Nr, and N to compute the probability estimate for # each value of r. self._estimate = self._calculate_estimate(Tr, Nr, N) def _calculate_Tr(self): """ Return the list *Tr*, where *Tr[r]* is the total count in ``heldout_fdist`` for all samples that occur *r* times in ``base_fdist``. :rtype: list(float) """ Tr = [0.0] * (self._max_r+1) for sample in self._heldout_fdist: r = self._base_fdist[sample] Tr[r] += self._heldout_fdist[sample] return Tr def _calculate_estimate(self, Tr, Nr, N): """ Return the list *estimate*, where *estimate[r]* is the probability estimate for any sample that occurs *r* times in the base frequency distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*. In the special case that *N[r]=0*, *estimate[r]* will never be used; so we define *estimate[r]=None* for those cases. :rtype: list(float) :type Tr: list(float) :param Tr: the list *Tr*, where *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in base distribution. :type Nr: list(float) :param Nr: The list *Nr*, where *Nr[r]* is the number of samples that occur *r* times in the base distribution. :type N: int :param N: The total number of outcomes recorded by the heldout frequency distribution. """ estimate = [] for r in range(self._max_r+1): if Nr[r] == 0: estimate.append(None) else: estimate.append(Tr[r]/(Nr[r]*N)) return estimate def base_fdist(self): """ Return the base frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._base_fdist def heldout_fdist(self): """ Return the heldout frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._heldout_fdist def samples(self): return self._base_fdist.keys() def prob(self, sample): # Use our precomputed probability estimate. r = self._base_fdist[sample] return self._estimate[r] def max(self): # Note: the Heldout estimation is *not* necessarily monotonic; # so this implementation is currently broken. However, it # should give the right answer *most* of the time. :) return self._base_fdist.max() def discount(self): raise NotImplementedError() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ s = '<HeldoutProbDist: %d base samples; %d heldout samples>' return s % (self._base_fdist.N(), self._heldout_fdist.N()) class CrossValidationProbDist(ProbDistI): """ The cross-validation estimate for the probability distribution of the experiment used to generate a set of frequency distribution. The "cross-validation estimate" for the probability of a sample is found by averaging the held-out estimates for the sample in each pair of frequency distributions. """ SUM_TO_ONE = False def __init__(self, freqdists, bins): """ Use the cross-validation estimate to create a probability distribution for the experiment used to generate ``freqdists``. :type freqdists: list(FreqDist) :param freqdists: A list of the frequency distributions generated by the experiment. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._freqdists = freqdists # Create a heldout probability distribution for each pair of # frequency distributions in freqdists. self._heldout_probdists = [] for fdist1 in freqdists: for fdist2 in freqdists: if fdist1 is not fdist2: probdist = HeldoutProbDist(fdist1, fdist2, bins) self._heldout_probdists.append(probdist) def freqdists(self): """ Return the list of frequency distributions that this ``ProbDist`` is based on. :rtype: list(FreqDist) """ return self._freqdists def samples(self): # [xx] nb: this is not too efficient return set(sum([fd.keys() for fd in self._freqdists], [])) def prob(self, sample): # Find the average probability estimate returned by each # heldout distribution. prob = 0.0 for heldout_probdist in self._heldout_probdists: prob += heldout_probdist.prob(sample) return prob/len(self._heldout_probdists) def discount(self): raise NotImplementedError() def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<CrossValidationProbDist: %d-way>' % len(self._freqdists) class WittenBellProbDist(ProbDistI): """ The Witten-Bell estimate of a probability distribution. This distribution allocates uniform probability mass to as yet unseen events by using the number of events that have only been seen once. The probability mass reserved for unseen events is equal to *T / (N + T)* where *T* is the number of observed event types and *N* is the total number of observed events. This equates to the maximum likelihood estimate of a new type event occurring. The remaining probability mass is discounted such that all probability estimates sum to one, yielding: - *p = T / Z (N + T)*, if count = 0 - *p = c / (N + T)*, otherwise """ def __init__(self, freqdist, bins=None): """ Creates a distribution of Witten-Bell probability estimates. This distribution allocates uniform probability mass to as yet unseen events by using the number of events that have only been seen once. The probability mass reserved for unseen events is equal to *T / (N + T)* where *T* is the number of observed event types and *N* is the total number of observed events. This equates to the maximum likelihood estimate of a new type event occurring. The remaining probability mass is discounted such that all probability estimates sum to one, yielding: - *p = T / Z (N + T)*, if count = 0 - *p = c / (N + T)*, otherwise The parameters *T* and *N* are taken from the ``freqdist`` parameter (the ``B()`` and ``N()`` values). The normalising factor *Z* is calculated using these values along with the ``bins`` parameter. :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be at least as large as the number of bins in the ``freqdist``. If None, then it's assumed to be equal to that of the ``freqdist`` :type bins: int """ assert bins is None or bins >= freqdist.B(),\ 'Bins parameter must not be less than freqdist.B()' if bins is None: bins = freqdist.B() self._freqdist = freqdist self._T = self._freqdist.B() self._Z = bins - self._freqdist.B() self._N = self._freqdist.N() # self._P0 is P(0), precalculated for efficiency: if self._N==0: # if freqdist is empty, we approximate P(0) by a UniformProbDist: self._P0 = 1.0 / self._Z else: self._P0 = self._T / float(self._Z * (self._N + self._T)) def prob(self, sample): # inherit docs from ProbDistI c = self._freqdist[sample] if c == 0: return self._P0 else: return c / float(self._N + self._T) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def freqdist(self): return self._freqdist def discount(self): raise NotImplementedError() def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<WittenBellProbDist based on %d samples>' % self._freqdist.N() ##////////////////////////////////////////////////////// ## Good-Turing Probablity Distributions ##////////////////////////////////////////////////////// # Good-Turing frequency estimation was contributed by Alan Turing and # his statistical assistant I.J. Good, during their collaboration in # the WWII. It is a statistical technique for predicting the # probability of occurrence of objects belonging to an unknown number # of species, given past observations of such objects and their # species. (In drawing balls from an urn, the 'objects' would be balls # and the 'species' would be the distinct colors of the balls (finite # but unknown in number). # # The situation frequency zero is quite common in the original # Good-Turing estimation. Bill Gale and Geoffrey Sampson present a # simple and effective approach, Simple Good-Turing. As a smoothing # curve they simply use a power curve: # # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic # relationsihp) # # They estimate a and b by simple linear regression technique on the # logarithmic form of the equation: # # log Nr = a + b*log(r) # # However, they suggest that such a simple curve is probably only # appropriate for high values of r. For low values of r, they use the # measured Nr directly. (see M&S, p.213) # # Gale and Sampson propose to use r while the difference between r and # r* is 1.96 greather than the standar deviation, and switch to r* if # it is less or equal: # # |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr)) # # The 1.96 coefficient correspond to a 0.05 significance criterion, # some implementations can use a coefficient of 1.65 for a 0.1 # significance criterion. # class GoodTuringProbDist(ProbDistI): """ The Good-Turing estimate of a probability distribution. This method calculates the probability mass to assign to events with zero or low counts based on the number of events with higher counts. It does so by using the smoothed count *c\**: - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1 - *things with frequency zero in training* = N(1) for c == 0 where *c* is the original count, *N(i)* is the number of event types observed with count *i*. We can think the count of unseen as the count of frequency one (see Jurafsky & Martin 2nd Edition, p101). """ def __init__(self, freqdist, bins=None): """ :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be at least as large as the number of bins in the ``freqdist``. If None, then it's assumed to be equal to that of the ``freqdist`` :type bins: int """ assert bins is None or bins >= freqdist.B(),\ 'Bins parameter must not be less than freqdist.B()' if bins is None: bins = freqdist.B() self._freqdist = freqdist self._bins = bins def prob(self, sample): count = self._freqdist[sample] # unseen sample's frequency (count zero) uses frequency one's if count == 0 and self._freqdist.N() != 0: p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N() if self._bins == self._freqdist.B(): p0 = 0.0 else: p0 = p0 / (1.0 * self._bins - self._freqdist.B()) nc = self._freqdist.Nr(count) ncn = self._freqdist.Nr(count + 1) # avoid divide-by-zero errors for sparse datasets if nc == 0 or self._freqdist.N() == 0: return 0 return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N()) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def discount(self): """ :return: The probability mass transferred from the seen samples to the unseen samples. :rtype: float """ return 1.0 * self._freqdist.Nr(1) / self._freqdist.N() def freqdist(self): return self._freqdist def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N() ##////////////////////////////////////////////////////// ## Simple Good-Turing Probablity Distributions ##////////////////////////////////////////////////////// class SimpleGoodTuringProbDist(ProbDistI): """ SimpleGoodTuring ProbDist approximates from frequency to freqency of frequency into a linear line under log space by linear regression. Details of Simple Good-Turing algorithm can be found in: - Good Turing smoothing without tears" (Gale & Sampson 1995), Journal of Quantitative Linguistics, vol. 2 pp. 217-237. - "Speech and Language Processing (Jurafsky & Martin), 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c)) - http://www.grsampson.net/RGoodTur.html Given a set of pair (xi, yi), where the xi denotes the freqency and yi denotes the freqency of freqency, we want to minimize their square variation. E(x) and E(y) represent the mean of xi and yi. - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x))) - intercept: a = E(y) - b.E(x) """ def __init__(self, freqdist, bins=None): """ :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be larger than the number of bins in the ``freqdist``. If None, then it's assumed to be equal to ``freqdist``.B() + 1 :type bins: int """ assert bins is None or bins > freqdist.B(),\ 'Bins parameter must not be less than freqdist.B() + 1' if bins is None: bins = freqdist.B() + 1 self._freqdist = freqdist self._bins = bins r, nr = self._r_Nr() self.find_best_fit(r, nr) self._switch(r, nr) self._renormalize(r, nr) def _r_Nr(self): """ Split the frequency distribution in two list (r, Nr), where Nr(r) > 0 """ r, nr = [], [] b, i = 0, 0 while b != self._freqdist.B(): nr_i = self._freqdist.Nr(i) if nr_i > 0: b += nr_i r.append(i) nr.append(nr_i) i += 1 return (r, nr) def find_best_fit(self, r, nr): """ Use simple linear regression to tune parameters self._slope and self._intercept in the log-log space based on count and Nr(count) (Work in log space to avoid floating point underflow.) """ # For higher sample frequencies the data points becomes horizontal # along line Nr=1. To create a more evident linear model in log-log # space, we average positive Nr values with the surrounding zero # values. (Church and Gale, 1991) if not r or not nr: # Empty r or nr? return zr = [] for j in range(len(r)): if j > 0: i = r[j-1] else: i = 0 if j != len(r) - 1: k = r[j+1] else: k = 2 * r[j] - i zr_ = 2.0 * nr[j] / (k - i) zr.append(zr_) log_r = [math.log(i) for i in r] log_zr = [math.log(i) for i in zr] xy_cov = x_var = 0.0 x_mean = 1.0 * sum(log_r) / len(log_r) y_mean = 1.0 * sum(log_zr) / len(log_zr) for (x, y) in zip(log_r, log_zr): xy_cov += (x - x_mean) * (y - y_mean) x_var += (x - x_mean)**2 if x_var != 0: self._slope = xy_cov / x_var else: self._slope = 0.0 self._intercept = y_mean - self._slope * x_mean def _switch(self, r, nr): """ Calculate the r frontier where we must switch from Nr to Sr when estimating E[Nr]. """ for i, r_ in enumerate(r): if len(r) == i + 1 or r[i+1] != r_ + 1: # We are at the end of r, or there is a gap in r self._switch_at = r_ break Sr = self.smoothedNr smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_) unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i] std = math.sqrt(self._variance(r_, nr[i], nr[i+1])) if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std: self._switch_at = r_ break def _variance(self, r, nr, nr_1): r = float(r) nr = float(nr) nr_1 = float(nr_1) return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr) def _renormalize(self, r, nr): """ It is necessary to renormalize all the probability estimates to ensure a proper probability distribution results. This can be done by keeping the estimate of the probability mass for unseen items as N(1)/N and renormalizing all the estimates for previously seen items (as Gale and Sampson (1995) propose). (See M&S P.213, 1999) """ prob_cov = 0.0 for r_, nr_ in zip(r, nr): prob_cov += nr_ * self._prob_measure(r_) if prob_cov: self._renormal = (1 - self._prob_measure(0)) / prob_cov def smoothedNr(self, r): """ Return the number of samples with count r. :param r: The amount of freqency. :type r: int :rtype: float """ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic # relationship) # Estimate a and b by simple linear regression technique on # the logarithmic form of the equation: log Nr = a + b*log(r) return math.exp(self._intercept + self._slope * math.log(r)) def prob(self, sample): """ Return the sample's probability. :param sample: sample of the event :type sample: str :rtype: float """ count = self._freqdist[sample] p = self._prob_measure(count) if count == 0: if self._bins == self._freqdist.B(): p = 0.0 else: p = p / (1.0 * self._bins - self._freqdist.B()) else: p = p * self._renormal return p def _prob_measure(self, count): if count == 0 and self._freqdist.N() == 0 : return 1.0 elif count == 0 and self._freqdist.N() != 0: return 1.0 * self._freqdist.Nr(1) / self._freqdist.N() if self._switch_at > count: Er_1 = 1.0 * self._freqdist.Nr(count+1) Er = 1.0 * self._freqdist.Nr(count) else: Er_1 = self.smoothedNr(count+1) Er = self.smoothedNr(count) r_star = (count + 1) * Er_1 / Er return r_star / self._freqdist.N() def check(self): prob_sum = 0.0 for i in range(0, len(self._Nr)): prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal print "Probability Sum:", prob_sum #assert prob_sum != 1.0, "probability sum should be one!" def discount(self): """ This function returns the total mass of probability transfers from the seen samples to the unseen samples. """ return 1.0 * self.smoothedNr(1) / self._freqdist.N() def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def freqdist(self): return self._freqdist def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<SimpleGoodTuringProbDist based on %d samples>'\ % self._freqdist.N() class MutableProbDist(ProbDistI): """ An mutable probdist where the probabilities may be easily modified. This simply copies an existing probdist, storing the probability values in a mutable dictionary and providing an update method. """ def __init__(self, prob_dist, samples, store_logs=True): """ Creates the mutable probdist based on the given prob_dist and using the list of samples given. These values are stored as log probabilities if the store_logs flag is set. :param prob_dist: the distribution from which to garner the probabilities :type prob_dist: ProbDist :param samples: the complete set of samples :type samples: sequence of any :param store_logs: whether to store the probabilities as logarithms :type store_logs: bool """ try: import numpy except ImportError: print "Error: Please install numpy; for instructions see http://www.nltk.org/" exit() self._samples = samples self._sample_dict = dict((samples[i], i) for i in range(len(samples))) self._data = numpy.zeros(len(samples), numpy.float64) for i in range(len(samples)): if store_logs: self._data[i] = prob_dist.logprob(samples[i]) else: self._data[i] = prob_dist.prob(samples[i]) self._logs = store_logs def samples(self): # inherit documentation return self._samples def prob(self, sample): # inherit documentation i = self._sample_dict.get(sample) if i is not None: if self._logs: return 2**(self._data[i]) else: return self._data[i] else: return 0.0 def logprob(self, sample): # inherit documentation i = self._sample_dict.get(sample) if i is not None: if self._logs: return self._data[i] else: return math.log(self._data[i], 2) else: return float('-inf') def update(self, sample, prob, log=True): """ Update the probability for the given sample. This may cause the object to stop being the valid probability distribution - the user must ensure that they update the sample probabilities such that all samples have probabilities between 0 and 1 and that all probabilities sum to one. :param sample: the sample for which to update the probability :type sample: any :param prob: the new probability :type prob: float :param log: is the probability already logged :type log: bool """ i = self._sample_dict.get(sample) assert i is not None if self._logs: if log: self._data[i] = prob else: self._data[i] = math.log(prob, 2) else: if log: self._data[i] = 2**(prob) else: self._data[i] = prob ##////////////////////////////////////////////////////// ## Probability Distribution Operations ##////////////////////////////////////////////////////// def log_likelihood(test_pdist, actual_pdist): if (not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI)): raise ValueError('expected a ProbDist.') # Is this right? return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist) def entropy(pdist): probs = [pdist.prob(s) for s in pdist.samples()] return -sum([p * math.log(p,2) for p in probs]) ##////////////////////////////////////////////////////// ## Conditional Distributions ##////////////////////////////////////////////////////// class ConditionalFreqDist(defaultdict): """ A collection of frequency distributions for a single experiment run under different conditions. Conditional frequency distributions are used to record the number of times each sample occurred, given the condition under which the experiment was run. For example, a conditional frequency distribution could be used to record the frequency of each word (type) in a document, given its length. Formally, a conditional frequency distribution can be defined as a function that maps from each condition to the FreqDist for the experiment under that condition. Conditional frequency distributions are typically constructed by repeatedly running an experiment under a variety of conditions, and incrementing the sample outcome counts for the appropriate conditions. For example, the following code will produce a conditional frequency distribution that encodes how often each word type occurs, given the length of that word type: >>> from nltk.probability import ConditionalFreqDist >>> from nltk.tokenize import word_tokenize >>> sent = "the the the dog dog some other words that we do not care about" >>> cfdist = ConditionalFreqDist() >>> for word in word_tokenize(sent): ... condition = len(word) ... cfdist[condition].inc(word) An equivalent way to do this is with the initializer: >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent)) The frequency distribution for each condition is accessed using the indexing operator: >>> cfdist[3] <FreqDist with 6 outcomes> >>> cfdist[3].freq('the') 0.5 >>> cfdist[3]['dog'] 2 When the indexing operator is used to access the frequency distribution for a condition that has not been accessed before, ``ConditionalFreqDist`` creates a new empty FreqDist for that condition. """ def __init__(self, cond_samples=None): """ Construct a new empty conditional frequency distribution. In particular, the count for every sample, under every condition, is zero. :param cond_samples: The samples to initialize the conditional frequency distribution with :type cond_samples: Sequence of (condition, sample) tuples """ defaultdict.__init__(self, FreqDist) if cond_samples: for (cond, sample) in cond_samples: self[cond].inc(sample) def conditions(self): """ Return a list of the conditions that have been accessed for this ``ConditionalFreqDist``. Use the indexing operator to access the frequency distribution for a given condition. Note that the frequency distributions for some conditions may contain zero sample outcomes. :rtype: list """ return sorted(self.keys()) def N(self): """ Return the total number of sample outcomes that have been recorded by this ``ConditionalFreqDist``. :rtype: int """ return sum(fdist.N() for fdist in self.itervalues()) def plot(self, *args, **kwargs): """ Plot the given samples from the conditional frequency distribution. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param samples: The samples to plot :type samples: list :param title: The title for the graph :type title: str :param conditions: The conditions to plot (default is all) :type conditions: list """ try: import pylab except ImportError: raise ValueError('The plot function requires the matplotlib package (aka pylab).' 'See http://matplotlib.sourceforge.net/') cumulative = _get_kwarg(kwargs, 'cumulative', False) conditions = _get_kwarg(kwargs, 'conditions', self.conditions()) title = _get_kwarg(kwargs, 'title', '') samples = _get_kwarg(kwargs, 'samples', sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted if not "linewidth" in kwargs: kwargs["linewidth"] = 2 for condition in conditions: if cumulative: freqs = list(self[condition]._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" legend_loc = 'lower right' else: freqs = [self[condition][sample] for sample in samples] ylabel = "Counts" legend_loc = 'upper right' # percents = [f * 100 for f in freqs] only in ConditionalProbDist? kwargs['label'] = str(condition) pylab.plot(freqs, *args, **kwargs) pylab.legend(loc=legend_loc) pylab.grid(True, color="silver") pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90) if title: pylab.title(title) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show() def tabulate(self, *args, **kwargs): """ Tabulate the given samples from the conditional frequency distribution. :param samples: The samples to plot :type samples: list :param title: The title for the graph :type title: str :param conditions: The conditions to plot (default is all) :type conditions: list """ cumulative = _get_kwarg(kwargs, 'cumulative', False) conditions = _get_kwarg(kwargs, 'conditions', self.conditions()) samples = _get_kwarg(kwargs, 'samples', sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted condition_size = max(len(str(c)) for c in conditions) print ' ' * condition_size, for s in samples: print "%4s" % str(s), print for c in conditions: print "%*s" % (condition_size, str(c)), if cumulative: freqs = list(self[c]._cumulative_frequencies(samples)) else: freqs = [self[c][sample] for sample in samples] for f in freqs: print "%4d" % f, print def __le__(self, other): if not isinstance(other, ConditionalFreqDist): return False return set(self.conditions()).issubset(other.conditions()) \ and all(self[c] <= other[c] for c in self.conditions()) def __lt__(self, other): if not isinstance(other, ConditionalFreqDist): return False return self <= other and self != other def __ge__(self, other): if not isinstance(other, ConditionalFreqDist): return False return other <= self def __gt__(self, other): if not isinstance(other, ConditionalFreqDist): return False return other < self def __repr__(self): """ Return a string representation of this ``ConditionalFreqDist``. :rtype: str """ return '<ConditionalFreqDist with %d conditions>' % len(self) class ConditionalProbDistI(defaultdict): """ A collection of probability distributions for a single experiment run under different conditions. Conditional probability distributions are used to estimate the likelihood of each sample, given the condition under which the experiment was run. For example, a conditional probability distribution could be used to estimate the probability of each word type in a document, given the length of the word type. Formally, a conditional probability distribution can be defined as a function that maps from each condition to the ``ProbDist`` for the experiment under that condition. """ def __init__(self): raise NotImplementedError("Interfaces can't be instantiated") def conditions(self): """ Return a list of the conditions that are represented by this ``ConditionalProbDist``. Use the indexing operator to access the probability distribution for a given condition. :rtype: list """ return self.keys() def __repr__(self): """ Return a string representation of this ``ConditionalProbDist``. :rtype: str """ return '<%s with %d conditions>' % (type(self).__name__, len(self)) class ConditionalProbDist(ConditionalProbDistI): """ A conditional probability distribution modelling the experiments that were used to generate a conditional frequency distribution. A ConditionalProbDist is constructed from a ``ConditionalFreqDist`` and a ``ProbDist`` factory: - The ``ConditionalFreqDist`` specifies the frequency distribution for each condition. - The ``ProbDist`` factory is a function that takes a condition's frequency distribution, and returns its probability distribution. A ``ProbDist`` class's name (such as ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify that class's constructor. The first argument to the ``ProbDist`` factory is the frequency distribution that it should model; and the remaining arguments are specified by the ``factory_args`` parameter to the ``ConditionalProbDist`` constructor. For example, the following code constructs a ``ConditionalProbDist``, where the probability distribution for each condition is an ``ELEProbDist`` with 10 bins: >>> from nltk.probability import ConditionalProbDist, ELEProbDist >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10) >>> print cpdist['run'].max() 'NN' >>> print cpdist['run'].prob('NN') 0.0813 """ def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args): """ Construct a new conditional probability distribution, based on the given conditional frequency distribution and ``ProbDist`` factory. :type cfdist: ConditionalFreqDist :param cfdist: The ``ConditionalFreqDist`` specifying the frequency distribution for each condition. :type probdist_factory: class or function :param probdist_factory: The function or class that maps a condition's frequency distribution to its probability distribution. The function is called with the frequency distribution as its first argument, ``factory_args`` as its remaining arguments, and ``factory_kw_args`` as keyword arguments. :type factory_args: (any) :param factory_args: Extra arguments for ``probdist_factory``. These arguments are usually used to specify extra properties for the probability distributions of individual conditions, such as the number of bins they contain. :type factory_kw_args: (any) :param factory_kw_args: Extra keyword arguments for ``probdist_factory``. """ # self._probdist_factory = probdist_factory # self._cfdist = cfdist # self._factory_args = factory_args # self._factory_kw_args = factory_kw_args factory = lambda: probdist_factory(FreqDist(), *factory_args, **factory_kw_args) defaultdict.__init__(self, factory) for condition in cfdist: self[condition] = probdist_factory(cfdist[condition], *factory_args, **factory_kw_args) class DictionaryConditionalProbDist(ConditionalProbDistI): """ An alternative ConditionalProbDist that simply wraps a dictionary of ProbDists rather than creating these from FreqDists. """ def __init__(self, probdist_dict): """ :param probdist_dict: a dictionary containing the probdists indexed by the conditions :type probdist_dict: dict any -> probdist """ defaultdict.__init__(self, DictionaryProbDist) self.update(probdist_dict) ##////////////////////////////////////////////////////// ## Adding in log-space. ##////////////////////////////////////////////////////// # If the difference is bigger than this, then just take the bigger one: _ADD_LOGS_MAX_DIFF = math.log(1e-30, 2) def add_logs(logx, logy): """ Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return *log(x+y)*. Conceptually, this is the same as returning ``log(2**(logx)+2**(logy))``, but the actual implementation avoids overflow errors that could result from direct computation. """ if (logx < logy + _ADD_LOGS_MAX_DIFF): return logy if (logy < logx + _ADD_LOGS_MAX_DIFF): return logx base = min(logx, logy) return base + math.log(2**(logx-base) + 2**(logy-base), 2) def sum_logs(logs): if len(logs) == 0: # Use some approximation to infinity. What this does # depends on your system's float implementation. return _NINF else: return reduce(add_logs, logs[1:], logs[0]) ##////////////////////////////////////////////////////// ## Probabilistic Mix-in ##////////////////////////////////////////////////////// class ProbabilisticMixIn(object): """ A mix-in class to associate probabilities with other classes (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class, define a new class that derives from an existing class and from ProbabilisticMixIn. You will need to define a new constructor for the new class, which explicitly calls the constructors of both its parent classes. For example: >>> from nltk.probability import ProbabilisticMixIn >>> class A: ... def __init__(self, x, y): self.data = (x,y) ... >>> class ProbabilisticA(A, ProbabilisticMixIn): ... def __init__(self, x, y, **prob_kwarg): ... A.__init__(self, x, y) ... ProbabilisticMixIn.__init__(self, **prob_kwarg) See the documentation for the ProbabilisticMixIn ``constructor<__init__>`` for information about the arguments it expects. You should generally also redefine the string representation methods, the comparison methods, and the hashing method. """ def __init__(self, **kwargs): """ Initialize this object's probability. This initializer should be called by subclass constructors. ``prob`` should generally be the first argument for those constructors. :param prob: The probability associated with the object. :type prob: float :param logprob: The log of the probability associated with the object. :type logprob: float """ if 'prob' in kwargs: if 'logprob' in kwargs: raise TypeError('Must specify either prob or logprob ' '(not both)') else: ProbabilisticMixIn.set_prob(self, kwargs['prob']) elif 'logprob' in kwargs: ProbabilisticMixIn.set_logprob(self, kwargs['logprob']) else: self.__prob = self.__logprob = None def set_prob(self, prob): """ Set the probability associated with this object to ``prob``. :param prob: The new probability :type prob: float """ self.__prob = prob self.__logprob = None def set_logprob(self, logprob): """ Set the log probability associated with this object to ``logprob``. I.e., set the probability associated with this object to ``2**(logprob)``. :param logprob: The new log probability :type logprob: float """ self.__logprob = logprob self.__prob = None def prob(self): """ Return the probability associated with this object. :rtype: float """ if self.__prob is None: if self.__logprob is None: return None self.__prob = 2**(self.__logprob) return self.__prob def logprob(self): """ Return ``log(p)``, where ``p`` is the probability associated with this object. :rtype: float """ if self.__logprob is None: if self.__prob is None: return None self.__logprob = math.log(self.__prob, 2) return self.__logprob class ImmutableProbabilisticMixIn(ProbabilisticMixIn): def set_prob(self, prob): raise ValueError, '%s is immutable' % self.__class__.__name__ def set_logprob(self, prob): raise ValueError, '%s is immutable' % self.__class__.__name__ ## Helper function for processing keyword arguments def _get_kwarg(kwargs, key, default): if key in kwargs: arg = kwargs[key] del kwargs[key] else: arg = default return arg ##////////////////////////////////////////////////////// ## Demonstration ##////////////////////////////////////////////////////// def _create_rand_fdist(numsamples, numoutcomes): """ Create a new frequency distribution, with random samples. The samples are numbers from 1 to ``numsamples``, and are generated by summing two numbers, each of which has a uniform distribution. """ import random fdist = FreqDist() for x in range(numoutcomes): y = (random.randint(1, (1+numsamples)/2) + random.randint(0, numsamples/2)) fdist.inc(y) return fdist def _create_sum_pdist(numsamples): """ Return the true probability distribution for the experiment ``_create_rand_fdist(numsamples, x)``. """ fdist = FreqDist() for x in range(1, (1+numsamples)/2+1): for y in range(0, numsamples/2+1): fdist.inc(x+y) return MLEProbDist(fdist) def demo(numsamples=6, numoutcomes=500): """ A demonstration of frequency distributions and probability distributions. This demonstration creates three frequency distributions with, and uses them to sample a random process with ``numsamples`` samples. Each frequency distribution is sampled ``numoutcomes`` times. These three frequency distributions are then used to build six probability distributions. Finally, the probability estimates of these distributions are compared to the actual probability of each sample. :type numsamples: int :param numsamples: The number of samples to use in each demo frequency distributions. :type numoutcomes: int :param numoutcomes: The total number of outcomes for each demo frequency distribution. These outcomes are divided into ``numsamples`` bins. :rtype: None """ # Randomly sample a stochastic process three times. fdist1 = _create_rand_fdist(numsamples, numoutcomes) fdist2 = _create_rand_fdist(numsamples, numoutcomes) fdist3 = _create_rand_fdist(numsamples, numoutcomes) # Use our samples to create probability distributions. pdists = [ MLEProbDist(fdist1), LidstoneProbDist(fdist1, 0.5, numsamples), HeldoutProbDist(fdist1, fdist2, numsamples), HeldoutProbDist(fdist2, fdist1, numsamples), CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples), GoodTuringProbDist(fdist1), SimpleGoodTuringProbDist(fdist1), SimpleGoodTuringProbDist(fdist1, 7), _create_sum_pdist(numsamples), ] # Find the probability of each sample. vals = [] for n in range(1,numsamples+1): vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists])) # Print the results in a formatted table. print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' % (numsamples, numsamples, numoutcomes)) print '='*9*(len(pdists)+2) FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual' print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1]) print '-'*9*(len(pdists)+2) FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f' for val in vals: print FORMATSTR % val # Print the totals for each column (should all be 1.0) zvals = zip(*vals) def sum(lst): return reduce(lambda x,y:x+y, lst, 0) sums = [sum(val) for val in zvals[1:]] print '-'*9*(len(pdists)+2) FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f' print FORMATSTR % tuple(sums) print '='*9*(len(pdists)+2) # Display the distributions themselves, if they're short enough. if len(`str(fdist1)`) < 70: print ' fdist1:', str(fdist1) print ' fdist2:', str(fdist2) print ' fdist3:', str(fdist3) print print 'Generating:' for pdist in pdists: fdist = FreqDist(pdist.generate() for i in range(5000)) print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55]) print def gt_demo(): from nltk import corpus emma_words = corpus.gutenberg.words('austen-emma.txt') fd = FreqDist(emma_words) gt = GoodTuringProbDist(fd) sgt = SimpleGoodTuringProbDist(fd) katz = SimpleGoodTuringProbDist(fd, 7) print '%18s %8s %12s %14s %12s' \ % ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" ) for key in fd: print '%18s %8d %12e %14e %12e' \ % (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key)) if __name__ == '__main__': demo(6, 10) demo(5, 5000) gt_demo() __all__ = ['ConditionalFreqDist', 'ConditionalProbDist', 'ConditionalProbDistI', 'CrossValidationProbDist', 'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist', 'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist', 'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist', 'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn', 'UniformProbDist', 'WittenBellProbDist', 'add_logs', 'log_likelihood', 'sum_logs', 'entropy']
agpl-3.0
jmschrei/scikit-learn
sklearn/datasets/mlcomp.py
289
3855
# Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause """Glue code to load http://mlcomp.org data as a scikit.learn dataset""" import os import numbers from sklearn.datasets.base import load_files def _load_document_classification(dataset_path, metadata, set_=None, **kwargs): if set_ is not None: dataset_path = os.path.join(dataset_path, set_) return load_files(dataset_path, metadata.get('description'), **kwargs) LOADERS = { 'DocumentClassification': _load_document_classification, # TODO: implement the remaining domain formats } def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs): """Load a datasets as downloaded from http://mlcomp.org Parameters ---------- name_or_id : the integer id or the string name metadata of the MLComp dataset to load set_ : select the portion to load: 'train', 'test' or 'raw' mlcomp_root : the filesystem path to the root folder where MLComp datasets are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME environment variable is looked up instead. **kwargs : domain specific kwargs to be passed to the dataset loader. Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'filenames', the files holding the raw to learn, 'target', the classification labels (integer index), 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. Note on the lookup process: depending on the type of name_or_id, will choose between integer id lookup or metadata name lookup by looking at the unzipped archives and metadata file. TODO: implement zip dataset loading too """ if mlcomp_root is None: try: mlcomp_root = os.environ['MLCOMP_DATASETS_HOME'] except KeyError: raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined") mlcomp_root = os.path.expanduser(mlcomp_root) mlcomp_root = os.path.abspath(mlcomp_root) mlcomp_root = os.path.normpath(mlcomp_root) if not os.path.exists(mlcomp_root): raise ValueError("Could not find folder: " + mlcomp_root) # dataset lookup if isinstance(name_or_id, numbers.Integral): # id lookup dataset_path = os.path.join(mlcomp_root, str(name_or_id)) else: # assume name based lookup dataset_path = None expected_name_line = "name: " + name_or_id for dataset in os.listdir(mlcomp_root): metadata_file = os.path.join(mlcomp_root, dataset, 'metadata') if not os.path.exists(metadata_file): continue with open(metadata_file) as f: for line in f: if line.strip() == expected_name_line: dataset_path = os.path.join(mlcomp_root, dataset) break if dataset_path is None: raise ValueError("Could not find dataset with metadata line: " + expected_name_line) # loading the dataset metadata metadata = dict() metadata_file = os.path.join(dataset_path, 'metadata') if not os.path.exists(metadata_file): raise ValueError(dataset_path + ' is not a valid MLComp dataset') with open(metadata_file) as f: for line in f: if ":" in line: key, value = line.split(":", 1) metadata[key.strip()] = value.strip() format = metadata.get('format', 'unknow') loader = LOADERS.get(format) if loader is None: raise ValueError("No loader implemented for format: " + format) return loader(dataset_path, metadata, set_=set_, **kwargs)
bsd-3-clause
rafwiewiora/msmbuilder
msmbuilder/utils/nearest.py
12
6505
# Author: Matthew Harrigan <matthew.p.harrigan@gmail.com> # Contributors: # Copyright (c) 2015, Stanford University and the Authors # All rights reserved. from __future__ import absolute_import, print_function, division from scipy.spatial import KDTree as sp_KDTree import numpy as np from . import check_iter_of_sequences class KDTree(object): """kd-tree for quick nearest-neighbor lookup This class provides an index into a set of k-dimensional points which can be used to rapidly look up the nearest neighbors of any point. This class wraps sklearn's implementation by taking a list of arrays and returning indices of the form (traj_i, frame_i). Parameters ---------- sequences : list of (N,K) array_like Each array contains data points to be indexed. This array is not copied, and so modifying this data will result in bogus results. leafsize : int, optional The number of points at which the algorithm switches over to brute-force. Has to be positive. Raises ------ RuntimeError The maximum recursion limit can be exceeded for large data sets. If this happens, either increase the value for the `leafsize` parameter or increase the recursion limit by:: >>> import sys >>> sys.setrecursionlimit(10000) Notes ----- The algorithm used is described in Maneewongvatana and Mount 1999. The general idea is that the kd-tree is a binary tree, each of whose nodes represents an axis-aligned hyperrectangle. Each node specifies an axis and splits the set of points based on whether their coordinate along that axis is greater than or less than a particular value. During construction, the axis and splitting point are chosen by the "sliding midpoint" rule, which ensures that the cells do not all become long and thin. The tree can be queried for the r closest neighbors of any given point (optionally returning only those within some maximum distance of the point). It can also be queried, with a substantial gain in efficiency, for the r approximate closest neighbors. For large dimensions (20 is already large) do not expect this to run significantly faster than brute force. High-dimensional nearest-neighbor queries are a substantial open problem in computer science. The tree also supports all-neighbors queries, both with arrays of points and with other kd-trees. These do use a reasonably efficient algorithm, but the kd-tree is not necessarily the best data structure for this sort of calculation. """ _allow_trajectory = False def __init__(self, sequences, leafsize=10): check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) self._kdtree = sp_KDTree(self._concat(sequences), leafsize=leafsize) def query(self, x, k=1, p=2, distance_upper_bound=np.inf): """Query the kd-tree for nearest neighbors Parameters ---------- x : array_like, last dimension self.m An array of points to query. k : int, optional The number of nearest neighbors to return. eps : nonnegative float, optional Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor. p : float, 1<=p<=infinity, optional Which Minkowski p-norm to use. 1 is the sum-of-absolute-values "Manhattan" distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance distance_upper_bound : nonnegative float, optional Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point. Returns ------- d : float or array of floats The distances to the nearest neighbors. If x has shape tuple+(self.m,), then d has shape tuple if k is one, or tuple+(k,) if k is larger than one. Missing neighbors (e.g. when k > n or distance_upper_bound is given) are indicated with infinite distances. If k is None, then d is an object array of shape tuple, containing lists of distances. In either case the hits are sorted by distance (nearest first). i : tuple(int, int) or array of tuple(int, int) The locations of the neighbors in self.data. Locations are given by tuples of (traj_i, frame_i) Examples -------- >>> from msmbuilder.utils import KDTree >>> X1 = 0.3 * np.random.RandomState(0).randn(500, 2) >>> X2 = 0.3 * np.random.RandomState(1).randn(1000, 2) + 10 >>> tree = KDTree([X1, X2]) >>> pts = np.array([[0, 0], [10, 10]]) >>> tree.query(pts) (array([ 0.0034, 0.0102]), array([[ 0, 410], [ 1, 670]])) >>> tree.query(pts[0]) (0.0034, array([ 0, 410])) """ cdists, cinds = self._kdtree.query(x, k, p, distance_upper_bound) return cdists, self._split_indices(cinds) # concat and split code lovingly copied from MultiSequenceClusterMixin def _concat(self, sequences): self.__lengths = [len(s) for s in sequences] if len(sequences) > 0 and isinstance(sequences[0], np.ndarray): concat = np.ascontiguousarray(np.concatenate(sequences)) else: raise TypeError('sequences must be a list of numpy arrays') assert sum(self.__lengths) == len(concat) return concat def _split(self, concat): return [concat[cl - l: cl] for (cl, l) in zip(np.cumsum(self.__lengths), self.__lengths)] def _split_indices(self, concat_inds): """Take indices in 'concatenated space' and return as pairs of (traj_i, frame_i) """ clengths = np.append([0], np.cumsum(self.__lengths)) mapping = np.zeros((clengths[-1], 2), dtype=int) for traj_i, (start, end) in enumerate(zip(clengths[:-1], clengths[1:])): mapping[start:end, 0] = traj_i mapping[start:end, 1] = np.arange(end - start) return mapping[concat_inds]
lgpl-2.1
pv/scikit-learn
sklearn/tree/tree.py
113
34767
""" This module gathers tree-based methods, including decision, regression and randomized trees. Single and multi-output problems are both handled. """ # Authors: Gilles Louppe <g.louppe@gmail.com> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Brian Holt <bdholt1@gmail.com> # Noel Dawe <noel@dawe.me> # Satrajit Gosh <satrajit.ghosh@gmail.com> # Joly Arnaud <arnaud.v.joly@gmail.com> # Fares Hedayati <fares.hedayati@gmail.com> # # Licence: BSD 3 clause from __future__ import division import numbers from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from ..base import BaseEstimator, ClassifierMixin, RegressorMixin from ..externals import six from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import check_array, check_random_state, compute_sample_weight from ..utils.validation import NotFittedError from ._tree import Criterion from ._tree import Splitter from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder from ._tree import Tree from . import _tree __all__ = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] # ============================================================================= # Types and constants # ============================================================================= DTYPE = _tree.DTYPE DOUBLE = _tree.DOUBLE CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy} CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE} DENSE_SPLITTERS = {"best": _tree.BestSplitter, "presort-best": _tree.PresortBestSplitter, "random": _tree.RandomSplitter} SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter, "random": _tree.RandomSparseSplitter} # ============================================================================= # Base decision tree # ============================================================================= class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator, _LearntSelectorMixin)): """Base class for decision trees. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, random_state, class_weight=None): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.random_state = random_state self.max_leaf_nodes = max_leaf_nodes self.class_weight = class_weight self.n_features_ = None self.n_outputs_ = None self.classes_ = None self.n_classes_ = None self.tree_ = None self.max_features_ = None def fit(self, X, y, sample_weight=None, check_input=True): """Build a decision tree from the training set (X, y). Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels in classification, real numbers in regression). In the regression case, use ``dtype=np.float64`` and ``order='C'`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csc") if issparse(X): X.sort_indices() if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: raise ValueError("No support for np.int64 index based " "sparse matrices") # Determine output settings n_samples, self.n_features_ = X.shape is_classification = isinstance(self, ClassifierMixin) y = np.atleast_1d(y) expanded_class_weight = None if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if is_classification: y = np.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: y_original = np.copy(y) y_store_unique_indices = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_store_unique_indices if self.class_weight is not None: expanded_class_weight = compute_sample_weight( self.class_weight, y_original) else: self.classes_ = [None] * self.n_outputs_ self.n_classes_ = [1] * self.n_outputs_ self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) # Check parameters max_depth = ((2 ** 31) - 1 if self.max_depth is None else self.max_depth) max_leaf_nodes = (-1 if self.max_leaf_nodes is None else self.max_leaf_nodes) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": if is_classification: max_features = max(1, int(np.sqrt(self.n_features_))) else: max_features = self.n_features_ elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: raise ValueError( 'Invalid value for max_features. Allowed string ' 'values are "auto", "sqrt" or "log2".') elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float if self.max_features > 0.0: max_features = max(1, int(self.max_features * self.n_features_)) else: max_features = 0 self.max_features_ = max_features if len(y) != n_samples: raise ValueError("Number of labels=%d does not match " "number of samples=%d" % (len(y), n_samples)) if self.min_samples_split <= 0: raise ValueError("min_samples_split must be greater than zero.") if self.min_samples_leaf <= 0: raise ValueError("min_samples_leaf must be greater than zero.") if not 0 <= self.min_weight_fraction_leaf <= 0.5: raise ValueError("min_weight_fraction_leaf must in [0, 0.5]") if max_depth <= 0: raise ValueError("max_depth must be greater than zero. ") if not (0 < max_features <= self.n_features_): raise ValueError("max_features must be in (0, n_features]") if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)): raise ValueError("max_leaf_nodes must be integral number but was " "%r" % max_leaf_nodes) if -1 < max_leaf_nodes < 2: raise ValueError(("max_leaf_nodes {0} must be either smaller than " "0 or larger than 1").format(max_leaf_nodes)) if sample_weight is not None: if (getattr(sample_weight, "dtype", None) != DOUBLE or not sample_weight.flags.contiguous): sample_weight = np.ascontiguousarray( sample_weight, dtype=DOUBLE) if len(sample_weight.shape) > 1: raise ValueError("Sample weights array has more " "than one dimension: %d" % len(sample_weight.shape)) if len(sample_weight) != n_samples: raise ValueError("Number of weights=%d does not match " "number of samples=%d" % (len(sample_weight), n_samples)) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Set min_weight_leaf from min_weight_fraction_leaf if self.min_weight_fraction_leaf != 0. and sample_weight is not None: min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) else: min_weight_leaf = 0. # Set min_samples_split sensibly min_samples_split = max(self.min_samples_split, 2 * self.min_samples_leaf) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): if is_classification: criterion = CRITERIA_CLF[self.criterion](self.n_outputs_, self.n_classes_) else: criterion = CRITERIA_REG[self.criterion](self.n_outputs_) SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS splitter = self.splitter if not isinstance(self.splitter, Splitter): splitter = SPLITTERS[self.splitter](criterion, self.max_features_, self.min_samples_leaf, min_weight_leaf, random_state) self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_) # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise if max_leaf_nodes < 0: builder = DepthFirstTreeBuilder(splitter, min_samples_split, self.min_samples_leaf, min_weight_leaf, max_depth) else: builder = BestFirstTreeBuilder(splitter, min_samples_split, self.min_samples_leaf, min_weight_leaf, max_depth, max_leaf_nodes) builder.build(self.tree_, X, y, sample_weight) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self def _validate_X_predict(self, X, check_input): """Validate X whenever one tries to predict, apply, predict_proba""" if self.tree_ is None: raise NotFittedError("Estimator not fitted, " "call `fit` before exploiting the model.") if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csr") if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): raise ValueError("No support for np.int64 index based " "sparse matrices") n_features = X.shape[1] if self.n_features_ != n_features: raise ValueError("Number of features of the model must " " match the input. Model n_features is %s and " " input n_features is %s " % (self.n_features_, n_features)) return X def predict(self, X, check_input=True): """Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted classes, or the predict values. """ X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) n_samples = X.shape[0] # Classification if isinstance(self, ClassifierMixin): if self.n_outputs_ == 1: return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: predictions = np.zeros((n_samples, self.n_outputs_)) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take( np.argmax(proba[:, k], axis=1), axis=0) return predictions # Regression else: if self.n_outputs_ == 1: return proba[:, 0] else: return proba[:, :, 0] def apply(self, X, check_input=True): """ Returns the index of the leaf that each sample is predicted as. Parameters ---------- X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- X_leaves : array_like, shape = [n_samples,] For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering. """ X = self._validate_X_predict(X, check_input) return self.tree_.apply(X) @property def feature_importances_(self): """Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Returns ------- feature_importances_ : array, shape = [n_features] """ if self.tree_ is None: raise NotFittedError("Estimator not fitted, call `fit` before" " `feature_importances_`.") return self.tree_.compute_feature_importances() # ============================================================================= # Public estimators # ============================================================================= class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin): """A decision tree classifier. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="gini") The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "entropy" for the information gain. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. Ignored if ``max_leaf_nodes`` is not None. min_samples_split : int, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : int, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. If not None then ``max_depth`` will be ignored. class_weight : dict, list of dicts, "balanced" or None, optional (default=None) Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- classes_ : array of shape = [n_classes] or a list of such arrays The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_classes_ : int or list The number of classes (for single output problems), or a list containing the number of classes for each output (for multi-output problems). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. See also -------- DecisionTreeRegressor References ---------- .. [1] http://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.cross_validation import cross_val_score >>> from sklearn.tree import DecisionTreeClassifier >>> clf = DecisionTreeClassifier(random_state=0) >>> iris = load_iris() >>> cross_val_score(clf, iris.data, iris.target, cv=10) ... # doctest: +SKIP ... array([ 1. , 0.93..., 0.86..., 0.93..., 0.93..., 0.93..., 0.93..., 1. , 0.93..., 1. ]) """ def __init__(self, criterion="gini", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None, class_weight=None): super(DecisionTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, random_state=random_state) def predict_proba(self, X, check_input=True): """Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) if self.n_outputs_ == 1: proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba def predict_log_proba(self, X): """Predict class log-probabilities of the input samples X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin): """A decision tree regressor. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="mse") The function to measure the quality of a split. The only supported criterion is "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. Ignored if ``max_leaf_nodes`` is not None. min_samples_split : int, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : int, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. If not None then ``max_depth`` will be ignored. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. See also -------- DecisionTreeClassifier References ---------- .. [1] http://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_boston >>> from sklearn.cross_validation import cross_val_score >>> from sklearn.tree import DecisionTreeRegressor >>> boston = load_boston() >>> regressor = DecisionTreeRegressor(random_state=0) >>> cross_val_score(regressor, boston.data, boston.target, cv=10) ... # doctest: +SKIP ... array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75..., 0.07..., 0.29..., 0.33..., -1.42..., -1.77...]) """ def __init__(self, criterion="mse", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None): super(DecisionTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, random_state=random_state) class ExtraTreeClassifier(DecisionTreeClassifier): """An extremely randomized tree classifier. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="gini", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, max_leaf_nodes=None, class_weight=None): super(ExtraTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, random_state=random_state) class ExtraTreeRegressor(DecisionTreeRegressor): """An extremely randomized tree regressor. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="mse", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, max_leaf_nodes=None): super(ExtraTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, random_state=random_state)
bsd-3-clause
ericdill/scikit-xray
doc/sphinxext/tests/test_docscrape.py
12
14257
# -*- encoding:utf-8 -*- import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from docscrape import NumpyDocString, FunctionDoc, ClassDoc from docscrape_sphinx import SphinxDocString, SphinxClassDoc from nose.tools import * doc_txt = '''\ numpy.multivariate_normal(mean, cov, shape=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N,N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. Warnings -------- Certain warnings apply. Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. See Also -------- some, other, funcs otherfunc : relationship Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] .. index:: random :refguide: random;distributions, random;gauss ''' doc = NumpyDocString(doc_txt) def test_signature(): assert doc['Signature'].startswith('numpy.multivariate_normal(') assert doc['Signature'].endswith('shape=None)') def test_summary(): assert doc['Summary'][0].startswith('Draw values') assert doc['Summary'][-1].endswith('covariance.') def test_extended_summary(): assert doc['Extended Summary'][0].startswith('The multivariate normal') def test_parameters(): assert_equal(len(doc['Parameters']), 3) assert_equal( [n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape']) arg, arg_type, desc = doc['Parameters'][1] assert_equal(arg_type, '(N,N) ndarray') assert desc[0].startswith('Covariance matrix') assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' def test_returns(): assert_equal(len(doc['Returns']), 1) arg, arg_type, desc = doc['Returns'][0] assert_equal(arg, 'out') assert_equal(arg_type, 'ndarray') assert desc[0].startswith('The drawn samples') assert desc[-1].endswith('distribution.') def test_notes(): assert doc['Notes'][0].startswith('Instead') assert doc['Notes'][-1].endswith('definite.') assert_equal(len(doc['Notes']), 17) def test_references(): assert doc['References'][0].startswith('..') assert doc['References'][-1].endswith('2001.') def test_examples(): assert doc['Examples'][0].startswith('>>>') assert doc['Examples'][-1].endswith('True]') def test_index(): assert_equal(doc['index']['default'], 'random') print(doc['index']) assert_equal(len(doc['index']), 2) assert_equal(len(doc['index']['refguide']), 2) def non_blank_line_by_line_compare(a, b): a = [l for l in a.split('\n') if l.strip()] b = [l for l in b.split('\n') if l.strip()] for n, line in enumerate(a): if not line == b[n]: raise AssertionError("Lines %s of a and b differ: " "\n>>> %s\n<<< %s\n" % (n, line, b[n])) def test_str(): non_blank_line_by_line_compare(str(doc), """numpy.multivariate_normal(mean, cov, shape=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N,N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. Warnings -------- Certain warnings apply. See Also -------- `some`_, `other`_, `funcs`_ `otherfunc`_ relationship Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] .. index:: random :refguide: random;distributions, random;gauss""") def test_sphinx_str(): sphinx_doc = SphinxDocString(doc_txt) non_blank_line_by_line_compare(str(sphinx_doc), """ .. index:: random single: random;distributions, random;gauss Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. :Parameters: **mean** : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 **cov** : (N,N) ndarray Covariance matrix of the distribution. **shape** : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). :Returns: **out** : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. .. warning:: Certain warnings apply. .. seealso:: :obj:`some`, :obj:`other`, :obj:`funcs` :obj:`otherfunc` relationship .. rubric:: Notes Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. .. rubric:: References .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. .. only:: latex [1]_, [2]_ .. rubric:: Examples >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] """) doc2 = NumpyDocString(""" Returns array of indices of the maximum values of along the given axis. Parameters ---------- a : {array_like} Array to look in. axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis""") def test_parameters_without_extended_description(): assert_equal(len(doc2['Parameters']), 2) doc3 = NumpyDocString(""" my_signature(*params, **kwds) Return this and that. """) def test_escape_stars(): signature = str(doc3).split('\n')[0] assert_equal(signature, 'my_signature(\*params, \*\*kwds)') doc4 = NumpyDocString( """a.conj() Return an array with all complex-valued elements conjugated.""") def test_empty_extended_summary(): assert_equal(doc4['Extended Summary'], []) doc5 = NumpyDocString( """ a.something() Raises ------ LinAlgException If array is singular. """) def test_raises(): assert_equal(len(doc5['Raises']), 1) name, _, desc = doc5['Raises'][0] assert_equal(name, 'LinAlgException') assert_equal(desc, ['If array is singular.']) def test_see_also(): doc6 = NumpyDocString( """ z(x,theta) See Also -------- func_a, func_b, func_c func_d : some equivalent func foo.func_e : some other func over multiple lines func_f, func_g, :meth:`func_h`, func_j, func_k :obj:`baz.obj_q` :class:`class_j`: fubar foobar """) assert len(doc6['See Also']) == 12 for func, desc, role in doc6['See Also']: if func in ('func_a', 'func_b', 'func_c', 'func_f', 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): assert(not desc) else: assert(desc) if func == 'func_h': assert role == 'meth' elif func == 'baz.obj_q': assert role == 'obj' elif func == 'class_j': assert role == 'class' else: assert role is None if func == 'func_d': assert desc == ['some equivalent func'] elif func == 'foo.func_e': assert desc == ['some other func over', 'multiple lines'] elif func == 'class_j': assert desc == ['fubar', 'foobar'] def test_see_also_print(): class Dummy(object): """ See Also -------- func_a, func_b func_c : some relationship goes here func_d """ pass obj = Dummy() s = str(FunctionDoc(obj, role='func')) assert(':func:`func_a`, :func:`func_b`' in s) assert(' some relationship' in s) assert(':func:`func_d`' in s) doc7 = NumpyDocString(""" Doc starts on second line. """) def test_empty_first_line(): assert doc7['Summary'][0].startswith('Doc starts') def test_no_summary(): str(SphinxDocString(""" Parameters ----------""")) def test_unicode(): doc = SphinxDocString(""" öäöäöäöäöåååå öäöäöäööäååå Parameters ---------- ååå : äää ööö Returns ------- ååå : ööö äää """) assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') def test_plot_examples(): cfg = dict(use_plots=True) doc = SphinxDocString(""" Examples -------- >>> import matplotlib.pyplot as plt >>> plt.plot([1,2,3],[4,5,6]) >>> plt.show() """, config=cfg) assert 'plot::' in str(doc), str(doc) doc = SphinxDocString(""" Examples -------- .. plot:: import matplotlib.pyplot as plt plt.plot([1,2,3],[4,5,6]) plt.show() """, config=cfg) assert str(doc).count('plot::') == 1, str(doc) def test_class_members(): class Dummy(object): """ Dummy class. """ def spam(self, a, b): """Spam\n\nSpam spam.""" pass def ham(self, c, d): """Cheese\n\nNo cheese.""" pass for cls in (ClassDoc, SphinxClassDoc): doc = cls(Dummy, config=dict(show_class_members=False)) assert 'Methods' not in str(doc), (cls, str(doc)) assert 'spam' not in str(doc), (cls, str(doc)) assert 'ham' not in str(doc), (cls, str(doc)) doc = cls(Dummy, config=dict(show_class_members=True)) assert 'Methods' in str(doc), (cls, str(doc)) assert 'spam' in str(doc), (cls, str(doc)) assert 'ham' in str(doc), (cls, str(doc)) if cls is SphinxClassDoc: assert '.. autosummary::' in str(doc), str(doc)
bsd-3-clause
UDST/activitysim
activitysim/abm/models/util/test/test_cdap.py
2
3824
# ActivitySim # See full license in LICENSE.txt. import os.path import pandas as pd import pandas.util.testing as pdt import pytest from .. import cdap from activitysim.core import simulate @pytest.fixture(scope='module') def data_dir(): return os.path.join(os.path.dirname(__file__), 'data') @pytest.fixture(scope='module') def configs_dir(): return os.path.join(os.path.dirname(__file__), 'configs') @pytest.fixture(scope='module') def people(data_dir): return pd.read_csv( os.path.join(data_dir, 'people.csv'), index_col='id') @pytest.fixture(scope='module') def cdap_indiv_and_hhsize1(configs_dir): return simulate.read_model_spec(file_name='cdap_indiv_and_hhsize1.csv', spec_dir=configs_dir) @pytest.fixture(scope='module') def cdap_interaction_coefficients(configs_dir): f = os.path.join(configs_dir, 'cdap_interaction_coefficients.csv') coefficients = pd.read_csv(f, comment='#') coefficients = cdap.preprocess_interaction_coefficients(coefficients) return coefficients @pytest.fixture(scope='module') def individual_utils( people, cdap_indiv_and_hhsize1): return cdap.individual_utilities(people, cdap_indiv_and_hhsize1, locals_d=None) def test_bad_coefficients(configs_dir): f = os.path.join(configs_dir, 'cdap_interaction_coefficients.csv') coefficients = pd.read_csv(f, comment='#') coefficients.loc[2, 'activity'] = 'AA' with pytest.raises(RuntimeError) as excinfo: coefficients = cdap.preprocess_interaction_coefficients(coefficients) assert "Expect only M, N, or H" in str(excinfo.value) def test_assign_cdap_rank(people): cdap.assign_cdap_rank(people) expected = pd.Series( [1, 1, 1, 2, 2, 1, 3, 1, 2, 1, 3, 2, 1, 3, 2, 4, 1, 3, 4, 2], index=people.index ) pdt.assert_series_equal(people['cdap_rank'], expected, check_dtype=False, check_names=False) def test_individual_utilities(people, cdap_indiv_and_hhsize1): cdap.assign_cdap_rank(people) individual_utils = cdap.individual_utilities(people, cdap_indiv_and_hhsize1, locals_d=None) individual_utils = individual_utils[['M', 'N', 'H']] expected = pd.DataFrame([ [2, 0, 0], # person 1 [0, 0, 1], # person 2 [3, 0, 0], # person 3 [3, 0, 0], # person 4 [0, 1, 0], # person 5 [1, 0, 0], # person 6 [1, 0, 0], # person 7 [0, 2, 0], # person 8 [0, 0, 1], # person 9 [2, 0, 0], # person 10 [0, 0, 3], # person 11 [0, 0, 2], # person 12 [3, 0, 0], # person 13 [1, 0, 0], # person 14 [0, 4, 0], # person 15 [0, 4, 0], # person 16 [0, 0, 4], # person 17 [0, 0, 5], # person 18 [50, 0, 4], # person 19 [2, 0, 0] # person 20 ], index=people.index, columns=cdap_indiv_and_hhsize1.columns) pdt.assert_frame_equal( individual_utils, expected, check_dtype=False, check_names=False) def test_build_cdap_spec_hhsize2(people, cdap_indiv_and_hhsize1, cdap_interaction_coefficients): hhsize = 2 cdap.assign_cdap_rank(people) indiv_utils = cdap.individual_utilities(people, cdap_indiv_and_hhsize1, locals_d=None) choosers = cdap.hh_choosers(indiv_utils, hhsize=hhsize) spec = cdap.build_cdap_spec(cdap_interaction_coefficients, hhsize=hhsize, cache=False) vars = simulate.eval_variables(spec.index, choosers) utils = simulate.compute_utilities(vars, spec) expected = pd.DataFrame([ [0, 3, 0, 3, 7, 3, 0, 3, 0], # household 3 [0, 0, 1, 1, 1, 2, 0, 0, 2], # household 4 ], index=[3, 4], columns=['HH', 'HM', 'HN', 'MH', 'MM', 'MN', 'NH', 'NM', 'NN']).astype('float') pdt.assert_frame_equal(utils, expected, check_names=False)
bsd-3-clause
0asa/scikit-learn
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
28
10014
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import ignore_warnings from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV) def test_sparse_coef(): """ Check that the sparse_coef propery works """ clf = ElasticNet() clf.coef_ = [1, 2, 3] assert_true(sp.isspmatrix(clf.sparse_coef_)) assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_) def test_normalize_option(): """ Check that the normalize option in enet works """ X = sp.csc_matrix([[-1], [0], [1]]) y = [-1, 0, 1] clf_dense = ElasticNet(fit_intercept=True, normalize=True) clf_sparse = ElasticNet(fit_intercept=True, normalize=True) clf_dense.fit(X, y) X = sp.csc_matrix(X) clf_sparse.fit(X, y) assert_almost_equal(clf_dense.dual_gap_, 0) assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_) def test_lasso_zero(): """Check that the sparse lasso can handle zero data without crashing""" X = sp.csc_matrix((3, 1)) y = [0, 0, 0] T = np.array([[1], [2], [3]]) clf = Lasso().fit(X, y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy_list_input(): """Test ElasticNet for various values of alpha and l1_ratio with list X""" X = np.array([[-1], [0], [1]]) X = sp.csc_matrix(X) Y = [-1, 0, 1] # just a straight line T = np.array([[2], [3], [4]]) # test sample # this should be the same as unregularized least squares clf = ElasticNet(alpha=0, l1_ratio=1.0) # catch warning about alpha=0. # this is discouraged but should work. ignore_warnings(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy_explicit_sparse_input(): """Test ElasticNet for various values of alpha and l1_ratio with sparse X""" f = ignore_warnings # training samples X = sp.lil_matrix((3, 1)) X[0, 0] = -1 # X[1, 0] = 0 X[2, 0] = 1 Y = [-1, 0, 1] # just a straight line (the identity function) # test samples T = sp.lil_matrix((3, 1)) T[0, 0] = 2 T[1, 0] = 3 T[2, 0] = 4 # this should be the same as lasso clf = ElasticNet(alpha=0, l1_ratio=1.0) f(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42, positive=False, n_targets=1): random_state = np.random.RandomState(seed) # build an ill-posed linear regression problem with many noisy features and # comparatively few samples # generate a ground truth model w = random_state.randn(n_features, n_targets) w[n_informative:] = 0.0 # only the top features are impacting the model if positive: w = np.abs(w) X = random_state.randn(n_samples, n_features) rnd = random_state.uniform(size=(n_samples, n_features)) X[rnd > 0.5] = 0.0 # 50% of zeros in input signal # generate training ground truth labels y = np.dot(X, w) X = sp.csc_matrix(X) if n_targets == 1: y = np.ravel(y) return X, y def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive): n_samples, n_features, max_iter = 100, 100, 1000 n_informative = 10 X, y = make_sparse_data(n_samples, n_features, n_informative, positive=positive) X_train, X_test = X[n_samples // 2:], X[:n_samples // 2] y_train, y_test = y[n_samples // 2:], y[:n_samples // 2] s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept, max_iter=max_iter, tol=1e-7, positive=positive, warm_start=True) s_clf.fit(X_train, y_train) assert_almost_equal(s_clf.dual_gap_, 0, 4) assert_greater(s_clf.score(X_test, y_test), 0.85) # check the convergence is the same as the dense version d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept, max_iter=max_iter, tol=1e-7, positive=positive, warm_start=True) d_clf.fit(X_train.toarray(), y_train) assert_almost_equal(d_clf.dual_gap_, 0, 4) assert_greater(d_clf.score(X_test, y_test), 0.85) assert_almost_equal(s_clf.coef_, d_clf.coef_, 5) assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5) # check that the coefs are sparse assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative) def test_sparse_enet_not_as_toy_dataset(): _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False, positive=False) _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True, positive=False) _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False, positive=True) _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True, positive=True) def test_sparse_lasso_not_as_toy_dataset(): n_samples = 100 max_iter = 1000 n_informative = 10 X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative) X_train, X_test = X[n_samples // 2:], X[:n_samples // 2] y_train, y_test = y[n_samples // 2:], y[:n_samples // 2] s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) s_clf.fit(X_train, y_train) assert_almost_equal(s_clf.dual_gap_, 0, 4) assert_greater(s_clf.score(X_test, y_test), 0.85) # check the convergence is the same as the dense version d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) d_clf.fit(X_train.toarray(), y_train) assert_almost_equal(d_clf.dual_gap_, 0, 4) assert_greater(d_clf.score(X_test, y_test), 0.85) # check that the coefs are sparse assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative) def test_enet_multitarget(): n_targets = 3 X, y = make_sparse_data(n_targets=n_targets) estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None) # XXX: There is a bug when precompute is not None! estimator.fit(X, y) coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_, estimator.dual_gap_) for k in range(n_targets): estimator.fit(X, y[:, k]) assert_array_almost_equal(coef[k, :], estimator.coef_) assert_array_almost_equal(intercept[k], estimator.intercept_) assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) def test_path_parameters(): X, y = make_sparse_data() max_iter = 50 n_alphas = 10 clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, fit_intercept=False) ignore_warnings(clf.fit)(X, y) # new params assert_almost_equal(0.5, clf.l1_ratio) assert_equal(n_alphas, clf.n_alphas) assert_equal(n_alphas, len(clf.alphas_)) sparse_mse_path = clf.mse_path_ ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data assert_almost_equal(clf.mse_path_, sparse_mse_path) def test_same_output_sparse_dense_lasso_and_enet_cv(): X, y = make_sparse_data(n_samples=40, n_features=10) for normalize in [True, False]: clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize) ignore_warnings(clfs.fit)(X, y) clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize) ignore_warnings(clfd.fit)(X.toarray(), y) assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) assert_array_almost_equal(clfs.alphas_, clfd.alphas_) clfs = LassoCV(max_iter=100, cv=4, normalize=normalize) ignore_warnings(clfs.fit)(X, y) clfd = LassoCV(max_iter=100, cv=4, normalize=normalize) ignore_warnings(clfd.fit)(X.toarray(), y) assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
bsd-3-clause
gfyoung/pandas
pandas/tests/indexes/ranges/test_setops.py
2
12685
from datetime import datetime, timedelta import numpy as np import pytest from pandas import Index, Int64Index, RangeIndex, UInt64Index import pandas._testing as tm class TestRangeIndexSetOps: @pytest.mark.parametrize("klass", [RangeIndex, Int64Index, UInt64Index]) def test_intersection_mismatched_dtype(self, klass): # check that we cast to float, not object index = RangeIndex(start=0, stop=20, step=2, name="foo") index = klass(index) flt = index.astype(np.float64) # bc index.equals(flt), we go through fastpath and get RangeIndex back result = index.intersection(flt) tm.assert_index_equal(result, index, exact=True) result = flt.intersection(index) tm.assert_index_equal(result, flt, exact=True) # neither empty, not-equals result = index.intersection(flt[1:]) tm.assert_index_equal(result, flt[1:], exact=True) result = flt[1:].intersection(index) tm.assert_index_equal(result, flt[1:], exact=True) # empty other result = index.intersection(flt[:0]) tm.assert_index_equal(result, flt[:0], exact=True) result = flt[:0].intersection(index) tm.assert_index_equal(result, flt[:0], exact=True) def test_intersection_empty(self, sort, names): # name retention on empty intersections index = RangeIndex(start=0, stop=20, step=2, name=names[0]) # empty other result = index.intersection(index[:0].rename(names[1]), sort=sort) tm.assert_index_equal(result, index[:0].rename(names[2]), exact=True) # empty self result = index[:0].intersection(index.rename(names[1]), sort=sort) tm.assert_index_equal(result, index[:0].rename(names[2]), exact=True) def test_intersection(self, sort): # intersect with Int64Index index = RangeIndex(start=0, stop=20, step=2) other = Index(np.arange(1, 6)) result = index.intersection(other, sort=sort) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index, sort=sort) expected = Index( np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) # intersect with increasing RangeIndex other = RangeIndex(1, 6) result = index.intersection(other, sort=sort) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) # intersect with decreasing RangeIndex other = RangeIndex(5, 0, -1) result = index.intersection(other, sort=sort) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) # reversed (GH 17296) result = other.intersection(index, sort=sort) tm.assert_index_equal(result, expected) # GH 17296: intersect two decreasing RangeIndexes first = RangeIndex(10, -2, -2) other = RangeIndex(5, -4, -1) expected = first.astype(int).intersection(other.astype(int), sort=sort) result = first.intersection(other, sort=sort).astype(int) tm.assert_index_equal(result, expected) # reversed result = other.intersection(first, sort=sort).astype(int) tm.assert_index_equal(result, expected) index = RangeIndex(5, name="foo") # intersect of non-overlapping indices other = RangeIndex(5, 10, 1, name="foo") result = index.intersection(other, sort=sort) expected = RangeIndex(0, 0, 1, name="foo") tm.assert_index_equal(result, expected) other = RangeIndex(-1, -5, -1) result = index.intersection(other, sort=sort) expected = RangeIndex(0, 0, 1) tm.assert_index_equal(result, expected) # intersection of empty indices other = RangeIndex(0, 0, 1) result = index.intersection(other, sort=sort) expected = RangeIndex(0, 0, 1) tm.assert_index_equal(result, expected) result = other.intersection(index, sort=sort) tm.assert_index_equal(result, expected) def test_intersection_non_overlapping_gcd(self, sort, names): # intersection of non-overlapping values based on start value and gcd index = RangeIndex(1, 10, 2, name=names[0]) other = RangeIndex(0, 10, 4, name=names[1]) result = index.intersection(other, sort=sort) expected = RangeIndex(0, 0, 1, name=names[2]) tm.assert_index_equal(result, expected) def test_union_noncomparable(self, sort): # corner case, non-Int64Index index = RangeIndex(start=0, stop=20, step=2) other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) result = index.union(other, sort=sort) expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) result = other.union(index, sort=sort) expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) @pytest.fixture( params=[ ( RangeIndex(0, 10, 1), RangeIndex(0, 10, 1), RangeIndex(0, 10, 1), RangeIndex(0, 10, 1), ), ( RangeIndex(0, 10, 1), RangeIndex(5, 20, 1), RangeIndex(0, 20, 1), Int64Index(range(20)), ), ( RangeIndex(0, 10, 1), RangeIndex(10, 20, 1), RangeIndex(0, 20, 1), Int64Index(range(20)), ), ( RangeIndex(0, -10, -1), RangeIndex(0, -10, -1), RangeIndex(0, -10, -1), RangeIndex(0, -10, -1), ), ( RangeIndex(0, -10, -1), RangeIndex(-10, -20, -1), RangeIndex(-19, 1, 1), Int64Index(range(0, -20, -1)), ), ( RangeIndex(0, 10, 2), RangeIndex(1, 10, 2), RangeIndex(0, 10, 1), Int64Index(list(range(0, 10, 2)) + list(range(1, 10, 2))), ), ( RangeIndex(0, 11, 2), RangeIndex(1, 12, 2), RangeIndex(0, 12, 1), Int64Index(list(range(0, 11, 2)) + list(range(1, 12, 2))), ), ( RangeIndex(0, 21, 4), RangeIndex(-2, 24, 4), RangeIndex(-2, 24, 2), Int64Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))), ), ( RangeIndex(0, -20, -2), RangeIndex(-1, -21, -2), RangeIndex(-19, 1, 1), Int64Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))), ), ( RangeIndex(0, 100, 5), RangeIndex(0, 100, 20), RangeIndex(0, 100, 5), Int64Index(range(0, 100, 5)), ), ( RangeIndex(0, -100, -5), RangeIndex(5, -100, -20), RangeIndex(-95, 10, 5), Int64Index(list(range(0, -100, -5)) + [5]), ), ( RangeIndex(0, -11, -1), RangeIndex(1, -12, -4), RangeIndex(-11, 2, 1), Int64Index(list(range(0, -11, -1)) + [1, -11]), ), (RangeIndex(0), RangeIndex(0), RangeIndex(0), RangeIndex(0)), ( RangeIndex(0, -10, -2), RangeIndex(0), RangeIndex(0, -10, -2), RangeIndex(0, -10, -2), ), ( RangeIndex(0, 100, 2), RangeIndex(100, 150, 200), RangeIndex(0, 102, 2), Int64Index(range(0, 102, 2)), ), ( RangeIndex(0, -100, -2), RangeIndex(-100, 50, 102), RangeIndex(-100, 4, 2), Int64Index(list(range(0, -100, -2)) + [-100, 2]), ), ( RangeIndex(0, -100, -1), RangeIndex(0, -50, -3), RangeIndex(-99, 1, 1), Int64Index(list(range(0, -100, -1))), ), ( RangeIndex(0, 1, 1), RangeIndex(5, 6, 10), RangeIndex(0, 6, 5), Int64Index([0, 5]), ), ( RangeIndex(0, 10, 5), RangeIndex(-5, -6, -20), RangeIndex(-5, 10, 5), Int64Index([0, 5, -5]), ), ( RangeIndex(0, 3, 1), RangeIndex(4, 5, 1), Int64Index([0, 1, 2, 4]), Int64Index([0, 1, 2, 4]), ), ( RangeIndex(0, 10, 1), Int64Index([]), RangeIndex(0, 10, 1), RangeIndex(0, 10, 1), ), ( RangeIndex(0), Int64Index([1, 5, 6]), Int64Index([1, 5, 6]), Int64Index([1, 5, 6]), ), ] ) def unions(self, request): """Inputs and expected outputs for RangeIndex.union tests""" return request.param def test_union_sorted(self, unions): idx1, idx2, expected_sorted, expected_notsorted = unions res1 = idx1.union(idx2, sort=None) tm.assert_index_equal(res1, expected_sorted, exact=True) res1 = idx1.union(idx2, sort=False) tm.assert_index_equal(res1, expected_notsorted, exact=True) res2 = idx2.union(idx1, sort=None) res3 = idx1._int64index.union(idx2, sort=None) tm.assert_index_equal(res2, expected_sorted, exact=True) tm.assert_index_equal(res3, expected_sorted) def test_difference(self): # GH#12034 Cases where we operate against another RangeIndex and may # get back another RangeIndex obj = RangeIndex.from_range(range(1, 10), name="foo") result = obj.difference(obj) expected = RangeIndex.from_range(range(0), name="foo") tm.assert_index_equal(result, expected, exact=True) result = obj.difference(expected.rename("bar")) tm.assert_index_equal(result, obj.rename(None), exact=True) result = obj.difference(obj[:3]) tm.assert_index_equal(result, obj[3:], exact=True) result = obj.difference(obj[-3:]) tm.assert_index_equal(result, obj[:-3], exact=True) result = obj[::-1].difference(obj[-3:]) tm.assert_index_equal(result, obj[:-3][::-1], exact=True) result = obj[::-1].difference(obj[-3:][::-1]) tm.assert_index_equal(result, obj[:-3][::-1], exact=True) result = obj.difference(obj[2:6]) expected = Int64Index([1, 2, 7, 8, 9], name="foo") tm.assert_index_equal(result, expected) def test_difference_mismatched_step(self): obj = RangeIndex.from_range(range(1, 10), name="foo") result = obj.difference(obj[::2]) expected = obj[1::2]._int64index tm.assert_index_equal(result, expected, exact=True) result = obj.difference(obj[1::2]) expected = obj[::2]._int64index tm.assert_index_equal(result, expected, exact=True) def test_symmetric_difference(self): # GH#12034 Cases where we operate against another RangeIndex and may # get back another RangeIndex left = RangeIndex.from_range(range(1, 10), name="foo") result = left.symmetric_difference(left) expected = RangeIndex.from_range(range(0), name="foo") tm.assert_index_equal(result, expected) result = left.symmetric_difference(expected.rename("bar")) tm.assert_index_equal(result, left.rename(None)) result = left[:-2].symmetric_difference(left[2:]) expected = Int64Index([1, 2, 8, 9], name="foo") tm.assert_index_equal(result, expected) right = RangeIndex.from_range(range(10, 15)) result = left.symmetric_difference(right) expected = RangeIndex.from_range(range(1, 15)) tm.assert_index_equal(result, expected) result = left.symmetric_difference(right[1:]) expected = Int64Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]) tm.assert_index_equal(result, expected)
bsd-3-clause
zrhans/python
exemplos/Examples.lnk/bokeh/plotting/file/glucose.py
2
1520
import pandas as pd from bokeh.sampledata.glucose import data from bokeh.plotting import * output_file("glucose.html", title="glucose.py example") TOOLS = "pan,wheel_zoom,box_zoom,reset,save" p1 = figure(x_axis_type="datetime", tools=TOOLS) p1.line(data.index, data['glucose'], color='red', legend='glucose') p1.line(data.index, data['isig'], color='blue', legend='isig') p1.title = "Glucose Measurements" p1.xaxis.axis_label = 'Date' p1.yaxis.axis_label = 'Value' day = data.ix['2010-10-06'] highs = day[day['glucose'] > 180] lows = day[day['glucose'] < 80] p2 = figure(x_axis_type="datetime", tools=TOOLS) p2.line(day.index.to_series(), day['glucose'], line_color="gray", line_dash="4 4", line_width=1, legend="glucose") p2.circle(highs.index, highs['glucose'], size=6, color='tomato', legend="high") p2.circle(lows.index, lows['glucose'], size=6, color='navy', legend="low") p2.title = "Glucose Range" p2.xgrid[0].grid_line_color=None p2.ygrid[0].grid_line_alpha=0.5 p2.xaxis.axis_label = 'Time' p2.yaxis.axis_label = 'Value' data['inrange'] = (data['glucose'] < 180) & (data['glucose'] > 80) window = 30.5*288 #288 is average number of samples in a month inrange = pd.rolling_sum(data.inrange, window) inrange = inrange.dropna() inrange = inrange/float(window) p3 = figure(x_axis_type="datetime", tools=TOOLS) p3.line(inrange.index, inrange, line_color="navy") p3.title = "Glucose In-Range Rolling Sum" p3.xaxis.axis_label = 'Date' p3.yaxis.axis_label = 'Proportion In-Range' show(VBox(p1,p2,p3))
gpl-2.0
oesteban/mriqc
mriqc/classifier/sklearn/_validation.py
1
7844
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: oesteban # @Date: 2017-06-21 16:44:27 import warnings import numbers import time import numpy as np # import scipy.sparse as sp from sklearn.base import is_classifier, clone from sklearn.utils import indexable, check_random_state, safe_indexing from sklearn.utils.validation import _num_samples from sklearn.utils.metaestimators import _safe_split from sklearn.externals.joblib import Parallel, delayed, logger from sklearn.metrics.scorer import check_scoring from sklearn.exceptions import FitFailedWarning from sklearn.model_selection._split import check_cv from sklearn.model_selection._validation import _index_param_value # from sklearn.preprocessing import LabelEncoder from ... import logging LOG = logging.getLogger('mriqc.classifier') def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """ Evaluate a score by cross-validation """ if not isinstance(scoring, (list, tuple)): scoring = [scoring] X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) splits = list(cv.split(X, y, groups)) scorer = [check_scoring(estimator, scoring=s) for s in scoring] # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in splits) group_order = [] if hasattr(cv, 'groups'): group_order = [np.array(cv.groups)[test].tolist()[0] for _, test in splits] return np.squeeze(np.array(scores)), group_order def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, error_score='raise'): """ Fit estimator and compute scores for a given dataset split. """ if verbose > 1: if parameters is None: msg = '' else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) LOG.info("[CV] %s %s", msg, (64 - len(msg)) * '.') # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)") else: fit_time = time.time() - start_time test_score = [_score(estimator, X_test, y_test, s) for s in scorer] score_time = time.time() - start_time - fit_time if return_train_score: train_score = [_score(estimator, X_train, y_train, s) for s in scorer] if verbose > 2: msg += ", score=".join(('%f' % ts for ts in test_score)) if verbose > 1: total_time = score_time + fit_time end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time)) LOG.info("[CV] %s %s", (64 - len(end_msg)) * '.', end_msg) ret = [train_score, test_score] if return_train_score else [test_score] if return_n_test_samples: ret.append(_num_samples(X_test)) if return_times: ret.extend([fit_time, score_time]) if return_parameters: ret.append(parameters) return ret def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: # e.g. unwrap memmapped scalars score = score.item() except ValueError: # non-scalar? pass if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def permutation_test_score(estimator, X, y, groups=None, cv=None, n_permutations=100, n_jobs=1, random_state=0, verbose=0, scoring=None): """ Evaluate the significance of a cross-validated score with permutations, as in test 1 of [Ojala2010]_. A modification of original sklearn's permutation test score function to evaluate p-value outside this function, so that the score can be reused from outside. .. [Ojala2010] Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) return permutation_scores def _permutation_test_score(estimator, X, y, groups, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) estimator.fit(X_train, y_train) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return safe_indexing(y, indices)
bsd-3-clause
Knuppknou/academia_ai
academia_ai/leafs/leafs.py
1
1435
import numpy as np import matplotlib.pyplot as plt import pickle print("Reloaded leafs!") class Leaf(object): ''' ''' def __init__(self, iid=-1, label=-1, matrix=np.zeros((1, 1)), labelstr=''): self.image = matrix if self.image.shape == (1, 1): print('Error: no input matrix given') self.label = label self.labelstr = labelstr self.iid = iid def pprint(self): print('Leaf with ID ', self.iid, ' and solution ', self.label) def plot_colours(matriz): '''Plots all three diffrent colours of a leaf. Input: matrix (3D) #dont work normaly because we only preprocess the green chanel ''' plt.subplot(2, 3, 1) plt.imshow(matriz[:, :, 0], cmap=plt.cm.Reds) plt.subplot(2, 3, 2) plt.imshow(matriz[:, :, 1], cmap=plt.cm.Greens) plt.subplot(2, 3, 3) plt.imshow(matriz[:, :, 2], cmap=plt.cm.Blues) plt.tight_layout() plt.show() ############################################# def save_Leafs(Leafs, path="Leafs.pkl"): f = open(path, 'wb') pickle.dump(Leafs, f) f.close() print('Saved "Leafs" with', len(Leafs), 'leafs in', path, '.') def load_Leafs(path="Leafs.pkl"): f = open(path, "rb") Leafs = pickle.load(open(path, "rb")) f.close() print('Loades "Leafs" from path', path, ',with', len(Leafs), 'Leafs.') return Leafs
mit
chrisburr/scikit-learn
examples/tree/plot_tree_regression_multioutput.py
22
1848
""" =================================================================== Multi-output Decision Tree Regression =================================================================== An example to illustrate multi-output regression with decision tree. The :ref:`decision trees <tree>` is used to predict simultaneously the noisy x and y observations of a circle given a single underlying feature. As a result, it learns local linear regressions approximating the circle. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += (0.5 - rng.rand(20, 2)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_3 = DecisionTreeRegressor(max_depth=8) regr_1.fit(X, y) regr_2.fit(X, y) regr_3.fit(X, y) # Predict X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) y_3 = regr_3.predict(X_test) # Plot the results plt.figure() s = 50 plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data") plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2") plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("data") plt.ylabel("target") plt.title("Multi-output Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
rajanshah/dx
dx/dx_models.py
3
34610
# # DX Analytics # Base Classes and Model Classes for Simulation # dx_models.py # # DX Analytics is a financial analytics library, mainly for # derviatives modeling and pricing by Monte Carlo simulation # # (c) Dr. Yves J. Hilpisch # The Python Quants GmbH # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # from dx_frame import * class simulation_class(object): ''' Providing base methods for simulation classes. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= generate_time_grid : returns time grid for simulation get_instrument_values: returns the current instrument values (array) ''' def __init__(self, name, mar_env, corr): try: self.name = name self.pricing_date = mar_env.pricing_date self.initial_value = mar_env.get_constant('initial_value') self.volatility = mar_env.get_constant('volatility') self.final_date = mar_env.get_constant('final_date') self.currency = mar_env.get_constant('currency') self.frequency = mar_env.get_constant('frequency') self.paths = mar_env.get_constant('paths') self.discount_curve = mar_env.get_curve('discount_curve') try: # if time_grid in mar_env take this # (for portfolio valuation) self.time_grid = mar_env.get_list('time_grid') except: self.time_grid = None try: # if there are special dates, then add these self.special_dates = mar_env.get_list('special_dates') except: self.special_dates = [] self.instrument_values = None self.correlated = corr if corr is True: # only needed in a portfolio context when # risk factors are correlated self.cholesky_matrix = mar_env.get_list('cholesky_matrix') self.rn_set = mar_env.get_list('rn_set')[self.name] self.random_numbers = mar_env.get_list('random_numbers') except: print "Error parsing market environment." def generate_time_grid(self): start = self.pricing_date end = self.final_date # pandas date_range function # freq = e.g. 'B' for Business Day, # 'W' for Weekly, 'M' for Monthly time_grid = pd.date_range(start=start, end=end, freq=self.frequency).to_pydatetime() time_grid = list(time_grid) # enhance time_grid by start, end and special_dates if start not in time_grid: time_grid.insert(0, start) # insert start date if not in list if end not in time_grid: time_grid.append(end) # insert end date if not in list if len(self.special_dates) > 0: # add all special dates time_grid.extend(self.special_dates) # delete duplicates and sort time_grid = sorted(set(time_grid)) self.time_grid = np.array(time_grid) def get_instrument_values(self, fixed_seed=True): if self.instrument_values is None: # only initiate simulation if there are no instrument values self.generate_paths(fixed_seed=fixed_seed, day_count=365.) elif fixed_seed is False: # also initiate re-simulation when fixed_seed is False self.generate_paths(fixed_seed=fixed_seed, day_count=365.) return self.instrument_values class geometric_brownian_motion(simulation_class): ''' Class to generate simulated paths based on the Black-Scholes-Merton geometric Brownian motion model. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model simulation object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths given the market environment ''' def __init__(self, name, mar_env, corr=False): super(geometric_brownian_motion, self).__init__(name, mar_env, corr) def update(self, initial_value=None, volatility=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if final_date is not None: self.final_date = final_date self.instrument_values = None def generate_paths(self, fixed_seed=False, day_count=365.): if self.time_grid is None: self.generate_time_grid() # method from generic model simulation class # number of dates for time grid M = len(self.time_grid) # number of paths I = self.paths # array initialization for path simulation paths = np.zeros((M, I)) # initialize first date with initial_value paths[0] = self.initial_value if self.correlated is False: # if not correlated generate random numbers rand = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: # if correlated use random number object as provided # in market environment rand = self.random_numbers # forward rates for drift of process forward_rates = self.discount_curve.get_forward_rates( self.time_grid, self.paths, dtobjects=True)[1] for t in range(1, len(self.time_grid)): # select the right time slice from the relevant # random number set if self.correlated is False: ran = rand[t] else: ran = np.dot(self.cholesky_matrix, rand[:, t, :]) ran = ran[self.rn_set] dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count # difference between two dates as year fraction rt = (forward_rates[t - 1] + forward_rates[t]) / 2 paths[t] = paths[t - 1] * np.exp((rt - 0.5 * self.volatility ** 2) * dt + self.volatility * np.sqrt(dt) * ran) # generate simulated values for the respective date self.instrument_values = paths class jump_diffusion(simulation_class): ''' Class to generate simulated paths based on the Merton (1976) jump diffusion model. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths given the market environment ''' def __init__(self, name, mar_env, corr=False): super(jump_diffusion, self).__init__(name, mar_env, corr) try: self.lamb = mar_env.get_constant('lambda') self.mu = mar_env.get_constant('mu') self.delt = mar_env.get_constant('delta') except: print "Error parsing market environment." def update(self, initial_value=None, volatility=None, lamb=None, mu=None, delta=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if lamb is not None: self.lamb = lamb if mu is not None: self.mu = mu if delta is not None: self.delt = delta if final_date is not None: self.final_date = final_date self.instrument_values = None def generate_paths(self, fixed_seed=False, day_count=365.): if self.time_grid is None: self.generate_time_grid() # method from generic model simulation class # number of dates for time grid M = len(self.time_grid) # number of paths I = self.paths # array initialization for path simulation paths = np.zeros((M, I)) # initialize first date with initial_value paths[0] = self.initial_value if self.correlated is False: # if not correlated generate random numbers sn1 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: # if correlated use random number object as provided # in market environment sn1 = self.random_numbers # Standard normally distributed seudo-random numbers # for the jump component sn2 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) forward_rates = self.discount_curve.get_forward_rates( self.time_grid, self.paths, dtobjects=True)[1] rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1) for t in range(1, len(self.time_grid)): # select the right time slice from the relevant # random number set if self.correlated is False: ran = sn1[t] else: # only with correlation in portfolio context ran = np.dot(self.cholesky_matrix, sn1[:, t, :]) ran = ran[self.rn_set] dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count # difference between two dates as year fraction poi = np.random.poisson(self.lamb * dt, I) # Poisson distributed pseudo-random numbers for jump component rt = (forward_rates[t - 1] + forward_rates[t]) / 2 paths[t] = paths[t - 1] * (np.exp((rt - rj- 0.5 * self.volatility ** 2) * dt + self.volatility * np.sqrt(dt) * ran) + (np.exp(self.mu + self.delt * sn2[t]) - 1) * poi) self.instrument_values = paths class stochastic_volatility(simulation_class): ''' Class to generate simulated paths based on the Heston (1993) stochastic volatility model. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths given the market environment get_volatility_values : returns array with simulated volatility paths ''' def __init__(self, name, mar_env, corr=False): super(stochastic_volatility, self).__init__(name, mar_env, corr) try: self.kappa = mar_env.get_constant('kappa') self.theta = mar_env.get_constant('theta') self.vol_vol = mar_env.get_constant('vol_vol') self.rho = mar_env.get_constant('rho') self.leverage = np.linalg.cholesky( np.array([[1.0, self.rho], [self.rho, 1.0]])) self.volatility_values = None except: print "Error parsing market environment." def update(self, initial_value=None, volatility=None, vol_vol=None, kappa=None, theta=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if vol_vol is not None: self.vol_vol = vol_vol if kappa is not None: self.kappa = kappa if theta is not None: self.theta = theta if final_date is not None: self.final_date = final_date self.time_grid = None self.instrument_values = None self.volatility_values = None def generate_paths(self, fixed_seed=True, day_count=365.): if self.time_grid is None: self.generate_time_grid() M = len(self.time_grid) I = self.paths paths = np.zeros((M, I)) va = np.zeros_like(paths) va_ = np.zeros_like(paths) paths[0] = self.initial_value va[0] = self.volatility ** 2 va_[0] = self.volatility ** 2 if self.correlated is False: sn1 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: sn1 = self.random_numbers # Pseudo-random numbers for the stochastic volatility sn2 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) forward_rates = self.discount_curve.get_forward_rates( self.time_grid, self.paths, dtobjects=True)[1] for t in range(1, len(self.time_grid)): dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count if self.correlated is False: ran = sn1[t] else: ran = np.dot(self.cholesky_matrix, sn1[:, t, :]) ran = ran[self.rn_set] rat = np.array([ran, sn2[t]]) rat = np.dot(self.leverage, rat) va_[t] = (va_[t - 1] + self.kappa * (self.theta - np.maximum(0, va_[t - 1])) * dt + np.sqrt(np.maximum(0, va_[t - 1])) * self.vol_vol * np.sqrt(dt) * rat[1]) va[t] = np.maximum(0, va_[t]) rt = (forward_rates[t - 1] + forward_rates[t]) / 2 paths[t] = paths[t - 1] * (np.exp((rt - 0.5 * va[t]) * dt + np.sqrt(va[t]) * np.sqrt(dt) * rat[0])) # moment matching stoch vol part paths[t] -= np.mean(paths[t - 1] * np.sqrt(va[t]) * math.sqrt(dt) * rat[0]) self.instrument_values = paths self.volatility_values = np.sqrt(va) def get_volatility_values(self): if self.volatility_values is None: self.generate_paths(self) return self.volatility_values class stoch_vol_jump_diffusion(simulation_class): ''' Class to generate simulated paths based on the Bates (1996) stochastic volatility jump-diffusion model. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths for the market environment get_volatility_values : returns array with simulated volatility paths ''' def __init__(self, name, mar_env, corr=False): super(stoch_vol_jump_diffusion, self).__init__(name, mar_env, corr) try: self.lamb = mar_env.get_constant('lambda') self.mu = mar_env.get_constant('mu') self.delt = mar_env.get_constant('delta') self.rho = mar_env.get_constant('rho') self.leverage = np.linalg.cholesky( np.array([[1.0, self.rho], [self.rho, 1.0]])) self.kappa = mar_env.get_constant('kappa') self.theta = mar_env.get_constant('theta') self.vol_vol = mar_env.get_constant('vol_vol') self.volatility_values = None except: print "Error parsing market environment." def update(self, initial_value=None, volatility=None, vol_vol=None, kappa=None, theta=None, lamb=None, mu=None, delta=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if vol_vol is not None: self.vol_vol = vol_vol if kappa is not None: self.kappa = kappa if theta is not None: self.theta = theta if lamb is not None: self.lamb = lamb if mu is not None: self.mu = mu if delta is not None: self.delt = delta if final_date is not None: self.final_date = final_date self.time_grid = None self.instrument_values = None self.volatility_values = None def generate_paths(self, fixed_seed=True, day_count=365.): if self.time_grid is None: self.generate_time_grid() M = len(self.time_grid) I = self.paths paths = np.zeros((M, I)) va = np.zeros_like(paths) va_ = np.zeros_like(paths) paths[0] = self.initial_value va[0] = self.volatility ** 2 va_[0] = self.volatility ** 2 if self.correlated is False: sn1 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: sn1 = self.random_numbers # Pseudo-random numbers for the jump component sn2 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) # Pseudo-random numbers for the stochastic volatility sn3 = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) forward_rates = self.discount_curve.get_forward_rates( self.time_grid, self.paths, dtobjects=True)[1] rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1) for t in range(1, len(self.time_grid)): dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count if self.correlated is False: ran = sn1[t] else: ran = np.dot(self.cholesky_matrix, sn1[:, t, :]) ran = ran[self.rn_set] rat = np.array([ran, sn3[t]]) rat = np.dot(self.leverage, rat) va_[t] = (va_[t - 1] + self.kappa * (self.theta - np.maximum(0, va_[t - 1])) * dt + np.sqrt(np.maximum(0, va_[t - 1])) * self.vol_vol * np.sqrt(dt) * rat[1]) va[t] = np.maximum(0, va_[t]) poi = np.random.poisson(self.lamb * dt, I) rt = (forward_rates[t - 1] + forward_rates[t]) / 2 paths[t] = paths[t - 1] * (np.exp((rt - rj - 0.5 * va[t]) * dt + np.sqrt(va[t]) * np.sqrt(dt) * rat[0]) + (np.exp(self.mu + self.delt * sn2[t]) - 1) * poi) # moment matching stoch vol part paths[t] -= np.mean(paths[t - 1] * np.sqrt(va[t]) * math.sqrt(dt) * rat[0]) self.instrument_values = paths self.volatility_values = np.sqrt(va) def get_volatility_values(self): if self.volatility_values is None: self.generate_paths(self) return self.volatility_values class square_root_diffusion(simulation_class): ''' Class to generate simulated paths based on the Cox-Ingersoll-Ross (1985) square-root diffusion. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths given the market environment ''' def __init__(self, name, mar_env, corr=False): super(square_root_diffusion, self).__init__(name, mar_env, corr) try: self.kappa = mar_env.get_constant('kappa') self.theta = mar_env.get_constant('theta') except: print "Error parsing market environment." def update(self, initial_value=None, volatility=None, kappa=None, theta=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if kappa is not None: self.kappa = kappa if theta is not None: self.theta = theta if final_date is not None: self.final_date = final_date self.instrument_values = None def generate_paths(self, fixed_seed=True, day_count=365.): if self.time_grid is None: self.generate_time_grid() M = len(self.time_grid) I = self.paths paths = np.zeros((M, I)) paths_ = np.zeros_like(paths) paths[0] = self.initial_value paths_[0] = self.initial_value if self.correlated is False: rand = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: rand = self.random_numbers for t in range(1, len(self.time_grid)): dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count if self.correlated is False: ran = rand[t] else: ran = np.dot(self.cholesky_matrix, rand[:, t, :]) ran = ran[self.rn_set] # full truncation Euler discretization paths_[t] = (paths_[t - 1] + self.kappa * (self.theta - np.maximum(0, paths_[t - 1])) * dt + np.sqrt(np.maximum(0, paths_[t - 1])) * self.volatility * np.sqrt(dt) * ran) paths[t] = np.maximum(0, paths_[t]) self.instrument_values = paths class stochastic_short_rate(object): ''' Class for discounting based on stochastic short rates based on square-root diffusion process. Attributes ========== name : string name of the object mar_env : market_environment object containing all relevant parameters Methods ======= get_forward_rates : return forward rates given a time list/array get_discount_factors : return discount factors given a time list/array ''' def __init__(self, name, mar_env): self.name = name try: try: mar_env.get_curve('discount_curve') except: mar_env.add_curve('discount_curve', 0.0) # dummy try: mar_env.get_constant('currency') except: mar_env.add_constant('currency', 'CUR') # dummy self.process = square_root_diffusion('process', mar_env) self.process.generate_paths() except: raise ValueError('Error parsing market environment.') def get_forward_rates(self, time_list, paths, dtobjects=True): if len(self.process.time_grid) != len(time_list) \ or self.process.paths != paths: self.process.paths = paths self.process.time_grid = time_list self.process.instrument_values = None rates = self.process.get_instrument_values() return time_list, rates def get_discount_factors(self, time_list, paths, dtobjects=True): discount_factors = [] if dtobjects is True: dlist = get_year_deltas(time_list) else: dlist = time_list forward_rate = self.get_forward_rates(time_list, paths, dtobjects)[1] for no in range(len(dlist)): factor = np.zeros_like(forward_rate[0, :]) for d in range(no, len(dlist) - 1): factor += ((dlist[d + 1] - dlist[d]) * (0.5 * (forward_rate[d + 1] + forward_rate[d]))) discount_factors.append(np.exp(-factor)) return time_list, np.array(discount_factors) def srd_forwards(initial_value, (kappa, theta, sigma), time_grid): ''' Function for forward vols/rates in SRD model. Parameters ========== initial_value: float initial value of the process kappa: float mean-reversion factor theta: float long-run mean sigma: float volatility factor (vol-vol) time_grid: list/array of datetime object dates to generate forwards for Returns ======= forwards: array forward vols/rates ''' t = get_year_deltas(time_grid) g = math.sqrt(kappa ** 2 + 2 * sigma ** 2) sum1 = ((kappa * theta * (np.exp(g * t) - 1)) / (2 * g + (kappa + g) * (np.exp(g * t) - 1))) sum2 = initial_value * ((4 * g ** 2 * np.exp(g * t)) / (2 * g + (kappa + g) * (np.exp(g * t) - 1)) ** 2) forwards = sum1 + sum2 return forwards class square_root_jump_diffusion(simulation_class): ''' Class to generate simulated paths based on the square-root jump diffusion model. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= update : updates parameters generate_paths : returns Monte Carlo paths for the market environment ''' def __init__(self, name, mar_env, corr=False): super(square_root_jump_diffusion, self).__init__(name, mar_env, corr) try: self.kappa = mar_env.get_constant('kappa') self.theta = mar_env.get_constant('theta') self.lamb = mar_env.get_constant('lambda') self.mu = mar_env.get_constant('mu') self.delt = mar_env.get_constant('delta') except: print "Error parsing market environment." def update(self, initial_value=None, volatility=None, kappa=None, theta=None, lamb=None, mu=None, delt=None, final_date=None): if initial_value is not None: self.initial_value = initial_value if volatility is not None: self.volatility = volatility if kappa is not None: self.kappa = kappa if theta is not None: self.theta = theta if lamb is not None: self.lamb = lamb if mu is not None: self.mu = mu if delt is not None: self.delt = delt if final_date is not None: self.final_date = final_date self.instrument_values = None self.time_grid = None def generate_paths(self, fixed_seed=True, day_count=365.): if self.time_grid is None: self.generate_time_grid() M = len(self.time_grid) I = self.paths paths = np.zeros((M, I)) paths_ = np.zeros_like(paths) paths[0] = self.initial_value paths_[0] = self.initial_value if self.correlated is False: rand = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: rand = self.random_numbers snr = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1) for t in range(1, len(self.time_grid)): dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count if self.correlated is False: ran = rand[t] else: ran = np.dot(self.cholesky_matrix, rand[:, t, :]) ran = ran[self.rn_set] poi = np.random.poisson(self.lamb * dt, I) # full truncation Euler discretization paths_[t, :] = (paths_[t - 1, :] + self.kappa * (self.theta - np.maximum(0, paths_[t - 1, :])) * dt + np.sqrt(np.maximum(0, paths_[t - 1, :])) * self.volatility * np.sqrt(dt) * ran + ((np.exp(self.mu + self.delt * snr[t]) - 1) * poi) * np.maximum(0, paths_[t - 1, :]) - rj * dt) paths[t, :] = np.maximum(0, paths_[t, :]) self.instrument_values = paths class square_root_jump_diffusion_plus(square_root_jump_diffusion): ''' Class to generate simulated paths based on the square-root jump diffusion model with term structure. Attributes ========== name : string name of the object mar_env : instance of market_environment market environment data for simulation corr : boolean True if correlated with other model object Methods ======= srd_forward_error : error function for forward rate/vols calibration generate_shift_base : generates a shift base to take term structure into account update : updates parameters update_shift_values : updates shift values for term structure generate_paths : returns Monte Carlo paths for the market environment update_forward_rates : updates forward rates (vol, int. rates) for given time grid ''' def __init__(self, name, mar_env, corr=False): super(square_root_jump_diffusion_plus, self).__init__(name, mar_env, corr) try: self.term_structure = mar_env.get_curve('term_structure') except: self.term_structure = None print "Missing Term Structure." self.forward_rates = [] self.shift_base = None self.shift_values = [] def srd_forward_error(self, p0): if p0[0] < 0 or p0[1] < 0 or p0[2] < 0: return 100 f_model = srd_forwards(self.initial_value, p0, self.term_structure[:, 0]) MSE = np.sum((self.term_structure[:, 1] - f_model) ** 2) / len(f_model) return MSE def generate_shift_base(self, p0): # calibration opt = sco.fmin(self.srd_forward_error, p0) # shift_calculation f_model = srd_forwards(self.initial_value, opt, self.term_structure[:, 0]) shifts = self.term_structure[:, 1] - f_model self.shift_base = np.array((self.term_structure[:, 0], shifts)).T def update_shift_values(self, k=1): if self.shift_base is not None: t = get_year_deltas(self.shift_base[:, 0]) tck = sci.splrep(t, self.shift_base[:, 1], k=k) self.generate_time_grid() st = get_year_deltas(self.time_grid) self.shift_values = np.array(zip(self.time_grid, sci.splev(st, tck, der=0))) else: self.shift_values = np.array(zip(self.time_grid, np.zeros(len(self.time_grid)))) def generate_paths(self, fixed_seed=True, day_count=365.): if self.time_grid is None: self.generate_time_grid() self.update_shift_values() M = len(self.time_grid) I = self.paths paths = np.zeros((M, I)) paths_ = np.zeros_like(paths) paths[0] = self.initial_value paths_[0] = self.initial_value if self.correlated is False: rand = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) else: rand = self.random_numbers snr = sn_random_numbers((1, M, I), fixed_seed=fixed_seed) forward_rates = self.discount_curve.get_forward_rates( self.time_grid, dtobjects=True) rj = self.lamb * (np.exp(self.mu + 0.5 * self.delt ** 2) - 1) for t in range(1, len(self.time_grid)): dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count if self.correlated is False: ran = rand[t] else: ran = np.dot(self.cholesky_matrix, rand[:, t, :]) ran = ran[self.rn_set] poi = np.random.poisson(self.lamb * dt, I) # full truncation Euler discretization paths_[t] = (paths_[t - 1] + self.kappa * (self.theta - np.maximum(0, paths_[t - 1])) * dt + np.sqrt(np.maximum(0, paths_[t - 1])) * self.volatility * np.sqrt(dt) * ran + ((np.exp(self.mu + self.delt * snr[t]) - 1) * poi) * np.maximum(0, paths_[t - 1]) - rj * dt) paths[t] = np.maximum(0, paths_[t]) + self.shift_values[t, 1] self.instrument_values = paths def update_forward_rates(self, time_grid=None): if time_grid is None: self.generate_time_grid() time_grid = self.time_grid t = get_year_deltas(time_grid) g = np.sqrt(self.kappa ** 2 + 2 * self.volatility ** 2) sum1 = ((self.kappa * self.theta * (np.exp(g * t) - 1)) / (2 * g + (self.kappa + g) * (np.exp(g * t) - 1))) sum2 = self.initial_value * ((4 * g ** 2 * np.exp(g * t)) / (2 * g + (self.kappa + g) * (np.exp(g * t) - 1)) ** 2) self.forward_rates = np.array(zip(time_grid, sum1 + sum2)) class general_underlying(object): ''' Needed for VAR-based portfolio modeling and valuation. ''' def __init__(self, name, data, val_env): self.name = name self.data = data self.paths = val_env.get_constant('paths') self.frequency = 'B' self.discount_curve = val_env.get_curve('discount_curve') self.special_dates = [] self.time_grid = val_env.get_list('time_grid') self.fit_model = None def get_instrument_values(self, fixed_seed=False): return self.data.values
agpl-3.0
samueljackson92/major-project
src/tests/test_utils.py
1
1768
import numpy as np import os.path import nose.tools import tests import pandas as pd from skimage import filters, io def assert_lists_equal(a, b): """Check if two lists are equal""" nose.tools.assert_true(len(a) == len(b)) nose.tools.assert_true(sorted(a) == sorted(b)) def assert_data_frame_columns_match(df, columns): """Check if the columns of a data frame match as list""" for name, exp_name in zip(df.columns, columns): nose.tools.assert_equal(name, exp_name) def load_file(file_name): """Load a testing image""" path = get_file_path(file_name) return io.imread(path, as_grey=True) def get_file_path(file_name): """Get the path to a test file""" return os.path.abspath(os.path.join(tests.TEST_DATA_FOLDER, file_name)) def load_data_frame(path): """Get a reference result as a pandas data frame""" return pd.DataFrame.from_csv(get_file_path(path)) def generate_linear_structure(size, with_noise=False): """Generate a basic linear structure, optionally with noise""" linear_structure = np.zeros(shape=(size, size)) linear_structure[:, size/2] = np.ones(size) if with_noise: linear_structure = np.identity(size) noise = np.random.rand(size, size) * 0.1 linear_structure += noise linear_structure = filters.gaussian_filter(linear_structure, 1.5) return linear_structure def generate_blob(): """ Generate a blob by drawing from the normal distribution across two axes and binning it to the required size """ mean = [0, 0] cov = [[1, 0], [0, 1]] # diagonal covariance, points lie on x or y-axis x, y = np.random.multivariate_normal(mean,cov, 50000).T h, xedges, yedges = np.histogram2d(x, y, bins=100) return h
mit
Akshay0724/scikit-learn
sklearn/feature_selection/tests/test_base.py
98
3681
import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from sklearn.base import BaseEstimator from sklearn.feature_selection.base import SelectorMixin from sklearn.utils import check_array from sklearn.utils.testing import assert_raises, assert_equal class StepSelector(SelectorMixin, BaseEstimator): """Retain every `step` features (beginning with 0)""" def __init__(self, step=2): self.step = step def fit(self, X, y=None): X = check_array(X, 'csc') self.n_input_feats = X.shape[1] return self def _get_support_mask(self): mask = np.zeros(self.n_input_feats, dtype=bool) mask[::self.step] = True return mask support = [True, False] * 5 support_inds = [0, 2, 4, 6, 8] X = np.arange(20).reshape(2, 10) Xt = np.arange(0, 20, 2).reshape(2, 5) Xinv = X.copy() Xinv[:, 1::2] = 0 y = [0, 1] feature_names = list('ABCDEFGHIJ') feature_names_t = feature_names[::2] feature_names_inv = np.array(feature_names) feature_names_inv[1::2] = '' def test_transform_dense(): sel = StepSelector() Xt_actual = sel.fit(X, y).transform(X) Xt_actual2 = StepSelector().fit_transform(X, y) assert_array_equal(Xt, Xt_actual) assert_array_equal(Xt, Xt_actual2) # Check dtype matches assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype) assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype) # Check 1d list and other dtype: names_t_actual = sel.transform([feature_names]) assert_array_equal(feature_names_t, names_t_actual.ravel()) # Check wrong shape raises error assert_raises(ValueError, sel.transform, np.array([[1], [2]])) def test_transform_sparse(): sparse = sp.csc_matrix sel = StepSelector() Xt_actual = sel.fit(sparse(X)).transform(sparse(X)) Xt_actual2 = sel.fit_transform(sparse(X)) assert_array_equal(Xt, Xt_actual.toarray()) assert_array_equal(Xt, Xt_actual2.toarray()) # Check dtype matches assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype) assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype) # Check wrong shape raises error assert_raises(ValueError, sel.transform, np.array([[1], [2]])) def test_inverse_transform_dense(): sel = StepSelector() Xinv_actual = sel.fit(X, y).inverse_transform(Xt) assert_array_equal(Xinv, Xinv_actual) # Check dtype matches assert_equal(np.int32, sel.inverse_transform(Xt.astype(np.int32)).dtype) assert_equal(np.float32, sel.inverse_transform(Xt.astype(np.float32)).dtype) # Check 1d list and other dtype: names_inv_actual = sel.inverse_transform([feature_names_t]) assert_array_equal(feature_names_inv, names_inv_actual.ravel()) # Check wrong shape raises error assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]])) def test_inverse_transform_sparse(): sparse = sp.csc_matrix sel = StepSelector() Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt)) assert_array_equal(Xinv, Xinv_actual.toarray()) # Check dtype matches assert_equal(np.int32, sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype) assert_equal(np.float32, sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype) # Check wrong shape raises error assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]])) def test_get_support(): sel = StepSelector() sel.fit(X, y) assert_array_equal(support, sel.get_support()) assert_array_equal(support_inds, sel.get_support(indices=True))
bsd-3-clause
sebchalmers/WTGen
SplineGen/SplineGen.py
3
5106
# -*- coding: utf-8 -*- """ Created on Fri Nov 16 20:18:08 2012 @author: sebastien Create a Cp/Ct table interpolation with sensitivity generation """ import numpy as np from scipy import interpolate import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import scipy.io #Some functions def MakeGrid(argx,argy): gridx = [[argx[i] for j in range(len(argy))] for i in range(len(argx))] gridy = [[argy[i] for j in range(len(argx))] for i in range(len(argy))] return np.array(gridx).T, np.array(gridy) #### Compute data for derivatives def Dx(P,knots_x,m,n,p): Px = np.zeros([m-1,n]) for k1 in range(m-1): for k2 in range(n): Px[k1,k2] = p*(P[k1+1,k2] - P[k1,k2])/(knots_x[k1+p+1]- knots_x[k1+1]) Px = Px.reshape(n*(m-1),) Ux = [[0. for k in range(p)]] Ux.append([knots_x[k] for k in range(p+1,m+1)]) Ux.append([1.0 for k in range(p)]) Ux = np.concatenate(Ux) return Px, Ux def Dy(P,knots_y,m,n,q): Py = np.zeros([m,n-1]) for k1 in range(m): for k2 in range(n-1): Py[k1,k2] = q*(P[k1,k2+1] - P[k1,k2])/(knots_y[k2+q+1]- knots_y[k2+1]) Py = Py.reshape(m*(n-1),) Uy = [[0. for k in range(q)]] Uy.append([knots_y[k] for k in range(q+1,n+1)]) Uy.append([1.0 for k in range(q)]) Uy = np.concatenate(Uy) return Py, Uy def writeData(varname, data, vartype, fileobj): # Function to write a given variable if not(isinstance(data,list)): Line = 'const ' + vartype+' ' + varname + ' = ' + str(data) + ';' else: Line = 'const ' + vartype+' ' + varname + '[' + str(len(data)) + '] = {' lendata = len(data) for k in range(lendata): Line += str(data[k]) if (k < lendata-1): Line += ',' Line += '};' fileobj.write(Line+'\n') def SplineGen(beta,lambda_,C,name): beta_shift = np.min(beta) lambda_shift = np.min(lambda_) beta -= beta_shift lambda_ -= lambda_shift betagrid, lambdagrid = MakeGrid(beta,lambda_) tck = interpolate.bisplrep(betagrid,lambdagrid,C,s=1e-2) #spline order knots_x = tck[0] knots_y = tck[1] checkpoints = tck[2] p = tck[3] q = tck[4] assert(p==3) assert(q==3) #P is size mxn, row major representation m = len(knots_x)-p-1 n = len(knots_y)-q-1 P = tck[2].reshape(m,n) Pline = tck[2] Px, Ux = Dx(P,knots_x,m,n,p) Py, Uy = Dy(P,knots_y,m,n,q) Pxx, Uxx = Dx(Px.reshape(m-1,n), Ux, m-1 ,n , p-1) Pyy, Uyy = Dy(Py.reshape(m,n-1), Uy, m ,n-1 , q-1) Pxy, _ = Dy(Px.reshape(m-1,n), knots_y, m-1 ,n , q) #### Write SplineData.h #### fileobj = open(name+'.h','w') ## write variables ## varDictionary = {'n': [n, 'int'], 'm': [m, 'int'], 'p': [p, 'int'], 'q': [q, 'int'], 'x_shift': [beta_shift, 'float'], 'y_shift': [lambda_shift, 'float'], 'knots_x': [list(knots_x),'float'], 'length_knots_x': [len(knots_x), 'int'], 'knots_y': [list(knots_y),'float'], 'length_knots_y': [len(knots_y), 'int'], 'P': [list(Pline), 'float'], 'length_P': [len(Pline), 'int'], 'Px': [list(Px), 'float'], 'length_Px': [len(Px), 'int'], 'Py': [list(Py), 'float'], 'length_Py': [len(Py), 'int'], 'Ux': [list(Ux), 'float'], 'length_Ux': [len(Ux), 'int'], 'Uy': [list(Uy), 'float'], 'length_Uy': [len(Uy), 'int'], 'Uxx': [list(Uxx), 'float'], 'length_Uxx': [len(Uxx), 'int'], 'Uyy': [list(Uyy), 'float'], 'length_Uyy': [len(Uyy), 'int'], 'Pxx': [list(Pxx), 'float'], 'length_Pxx': [len(Pxx), 'int'], 'Pyy': [list(Pyy), 'float'], 'length_Pyy': [len(Pyy), 'int'], 'Pxy': [list(Pxy), 'float'], 'length_Pxy': [len(Pxy), 'int']} for key in varDictionary.keys(): writeData(key, varDictionary[key][0], varDictionary[key][1], fileobj) fileobj.close() return tck, betagrid, lambdagrid
apache-2.0
melqkiades/yelp
source/python/topicmodeling/context/context_utils.py
1
18904
from collections import Counter import cPickle as pickle import json import math import random import itertools import h5py import networkx import sklearn from networkx.algorithms.approximation import dominating_set from networkx.algorithms.approximation import vertex_cover import nltk from nltk.corpus import wordnet from nltk.corpus.reader import Synset import numpy as np import time import scipy.misc from sklearn.cluster import AffinityPropagation, DBSCAN from datamining import cluster_evaluation from datamining.cluster_evaluation import DunnCalculator from topicmodeling.context.sense_clusterer import BaumanSensesGrouper from topicmodeling.context.senses_group import SenseGroup from topicmodeling.context.review import Review from utils.constants import Constants __author__ = 'fpena' from nltk import tokenize def load_reviews(reviews_file): records = [json.loads(line) for line in open(reviews_file)] reviews = [] for record in records: reviews.append(record['text']) return reviews def get_all_nouns(reviews): nouns = set() for review in reviews: nouns |= set(review.nouns) return nouns def remove_nouns_from_reviews(reviews, nouns): for noun in nouns: for review in reviews: if noun in review.nouns: review.nouns.remove(noun) def generate_senses(review): review.senses = set() for noun in review.nouns: review.senses |= set(wordnet.synsets(noun, pos='n')) def generate_all_senses(reviews): all_senses = set() for review in reviews: generate_senses(review) all_senses |= set(review.senses) return all_senses def calculate_word_weighted_frequency(word, reviews): """ :type word: str :param word: :type reviews: list[Review] :param reviews: :rtype: float :return: """ num_reviews = 0.0 for review in reviews: if word in review.nouns: num_reviews += 1 return num_reviews / len(reviews) def build_groups(nouns): print('building groups', time.strftime("%H:%M:%S")) all_senses = set() sense_word_map = {} for noun in nouns: senses = wordnet.synsets(noun, pos='n') all_senses.update(senses) for sense in senses: if sense.name() not in sense_word_map: sense_word_map[sense.name()] = [] sense_word_map[sense.name()].append(noun) all_senses = list(all_senses) all_senses_names = [sense.name() for sense in all_senses] print('number of senses:', len(all_senses)) senses_similarity_matrix = build_sense_similarity_matrix(all_senses) groups = [] bronk2_synset([], all_senses_names, [], groups, senses_similarity_matrix) # bronk2_synset([], all_senses[:], [], groups, all_senses[:]) sense_groups = [] for group in groups: sense_group = SenseGroup(group) for sense in sense_group.senses: sense_group.nouns |= set(sense_word_map[sense]) sense_groups.append(sense_group) print('number of sense groups:', len(sense_groups)) print('finished groups', time.strftime("%H:%M:%S")) return sense_groups def build_sense_similarity_matrix(senses): """ :type senses: list[Synset] :param senses: """ print('building senses similarity matrix', time.strftime("%H:%M:%S")) similarity_matrix = {} for sense in senses: similarity_matrix[sense.name()] = {} index = 1 num_senses = len(senses) for sense1 in senses: similarity_matrix[sense1.name()][sense1.name()] = 1.0 for sense2 in senses[index:]: similarity = sense1.wup_similarity(sense2) similarity_matrix[sense1.name()][sense2.name()] = similarity similarity_matrix[sense2.name()][sense1.name()] = similarity if not index % 100: print('%s: completed %d/%d senses' % (time.strftime("%Y/%d/%m-%H:%M:%S"), index, num_senses)) index += 1 print('finished senses similarity matrix', time.strftime("%H:%M:%S")) return similarity_matrix def get_synset_neighbours(synset, similarity_matrix): neighbours = [] for element in similarity_matrix.keys(): if synset == element: continue if similarity_matrix[synset][element] >= 0.7: neighbours.append(element) # if synset.wup_similarity(element) >= 0.9: # neighbours.append(element) return neighbours def is_similar(number1, number2): if math.fabs(number1 - number2) < 3: return True return False def get_neighbours(number, potential_neighbours): neighbours = [] for element in potential_neighbours: if number == element: continue if is_similar(number, element): neighbours.append(element) return neighbours def is_noun_in_group(noun, group): senses = wordnet.synsets(noun, pos='n') return any(i in senses for i in group) def is_group_in_review(group, review): """ :type group list[Synset] :param group: :type review: Review :param review: """ for noun in review.nouns: if is_noun_in_group(noun, group): return True return False def get_text_from_reviews(reviews): """ Receives a list[Review] and extracts the text contained in each review. Returns a list[str]. :type reviews: list[dict] :param reviews: """ text_reviews = [] for review in reviews: text_reviews.append(review[Constants.TEXT_FIELD]) return text_reviews def calculate_group_weighted_frequency(group, reviews): num_reviews = 0.0 for review in reviews: review_senses = set([sense.name() for sense in review.senses]) if not frozenset(review_senses).isdisjoint(frozenset(group.senses)): num_reviews += 1 return num_reviews / len(reviews) def get_context_similarity(context1, context2, topic_indices): # We filter the topic model, selecting only the topics that contain context filtered_context1 = np.array([context1[i[0]] for i in topic_indices]) filtered_context2 = np.array([context2[i[0]] for i in topic_indices]) return 1 / (1 + np.linalg.norm(filtered_context1-filtered_context2)) def choose_pivot(list1, list2): index = random.randint(0, len(list1) + len(list2) - 1) if index < len(list1): return list1[index] else: return list2[index-len(list1)] def list_difference(list1, list2): return [item for item in list1 if item not in list2] def bronk2_synset(clique, candidates, excluded, clique_list, similarity_matrix): if len(candidates) == 0 and len(excluded) == 0: # print clique clique_list.append(clique) return pivot = choose_pivot(candidates, excluded) neighbours = get_synset_neighbours(pivot, similarity_matrix) p_minus_neighbours = list_difference(candidates, neighbours)[:] for vertex in p_minus_neighbours: vertex_neighbours = get_synset_neighbours(vertex, similarity_matrix) new_candidates = [val for val in candidates if val in vertex_neighbours] # p intersects N(vertex) new_excluded = [val for val in excluded if val in vertex_neighbours] # x intersects N(vertex) bronk2_synset(clique + [vertex], new_candidates, new_excluded, clique_list, similarity_matrix) candidates.remove(vertex) excluded.append(vertex) def generate_stats(specific_reviews, generic_reviews): num_specific = float(len(specific_reviews)) num_generic = float(len(generic_reviews)) num_total_reviews = num_specific + num_generic print('Specific reviews: %d (%f %%)' % (num_specific, (num_specific / num_total_reviews * 100))) stat_reviews(specific_reviews) print('Generic reviews %d (%f %%)' % (num_generic, (num_generic / num_total_reviews * 100))) stat_reviews(generic_reviews) def stat_reviews(reviews): """ :type reviews: list[Review] :param reviews: """ tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+') stats = np.zeros(5) num_reviews = len(reviews) for review in reviews: text = review.text num_sentences = len(tokenize.sent_tokenize(text)) num_words = len(tokenizer.tokenize(text.lower())) tagged_words = review.tagged_words tags_count = Counter(tag for word, tag in tagged_words) num_past_verbs = float(tags_count['VBD']) num_verbs = tags_count['VB'] + tags_count['VBD'] + tags_count['VBG'] +\ tags_count['VBN'] + tags_count['VBP'] + tags_count['VBZ'] ratio = (num_past_verbs + 1) / (num_verbs + 1) stats[0] += num_sentences stats[1] += num_words stats[2] += num_past_verbs stats[3] += num_verbs stats[4] += ratio for index in range(len(stats)): stats[index] /= num_reviews print('Average sentences:', stats[0]) print('Average words:', stats[1]) print('Average past verbs:', stats[2]) print('Average verbs:', stats[3]) print('Average past verbs ratio:', stats[4]) def create_graph(reviews_file, graph_file): print('%s: start' % time.strftime("%Y/%d/%m-%H:%M:%S")) with open(reviews_file, 'rb') as read_file: reviews = pickle.load(read_file) reviews = reviews[:13] print('num reviews: %d' % len(reviews)) print('%s: loaded reviews' % time.strftime("%Y/%d/%m-%H:%M:%S")) all_nouns = list(get_all_nouns(reviews)) print('num nouns: %d' % len(all_nouns)) print('%s: obtained nouns' % time.strftime("%Y/%d/%m-%H:%M:%S")) all_senses = generate_all_senses(reviews) total_possible_vertices = scipy.misc.comb(len(all_senses), 2, exact=True) print('num senses: %d' % len(all_senses)) print('total possible senses: %d' % total_possible_vertices) print('%s: obtained senses' % time.strftime("%Y/%d/%m-%H:%M:%S")) graph = networkx.Graph() for sense in all_senses: graph.add_node(sense.name()) print('%s: created graph' % time.strftime("%Y/%d/%m-%H:%M:%S")) print('num nodes: %d' % len(graph.nodes())) cycle = 0 for synset1, synset2 in itertools.combinations(all_senses, 2): cycle += 1 if not cycle % 100000: print('sense cycle: %d/%d\r' % (cycle, total_possible_vertices)), if synset1.wup_similarity(synset2) >= 0.7: graph.add_edge(synset1.name(), synset2.name()) print('%s: added vertices' % time.strftime("%Y/%d/%m-%H:%M:%S")) print('num edges: %d' % len(graph.edges())) with open(graph_file, 'wb') as write_file: pickle.dump(graph, write_file, pickle.HIGHEST_PROTOCOL) with open(graph_file, 'rb') as read_file: graph = pickle.load(read_file) print('num nodes: %d' % len(graph.nodes())) print('num edges: %d' % len(graph.edges())) my_dominating_set = dominating_set.min_weighted_dominating_set(graph) print('%s found dominating set' % time.strftime("%Y/%d/%m-%H:%M:%S")) print('dominating set length: %d' % len(my_dominating_set)) my_vertex_cover = vertex_cover.min_weighted_vertex_cover(graph) print('%s found vertex cover' % time.strftime("%Y/%d/%m-%H:%M:%S")) print('vertex cover length: %d' % len(my_vertex_cover)) def build_hdf5_sense_similarity_matrix(senses): """ :type senses: list[Synset] :param senses: """ num_senses = len(senses) hdf5_file = Constants.DATASET_FOLDER + Constants.ITEM_TYPE +\ '_sense_similarity_matrix.hdf5' f = h5py.File(hdf5_file, 'w') similarity_matrix = f.create_dataset( Constants.ITEM_TYPE + "_sense_similarity_matrix", (num_senses, num_senses) ) sense_index_map = {} sense_index = 0 for sense in senses: if sense in sense_index_map: raise ValueError('There are repeated items in the senses iterable') sense_index_map[sense.name()] = sense_index sense_index += 1 sense_index_map_file = Constants.DATASET_FOLDER + Constants.ITEM_TYPE +\ '_sense_index_map.pkl' with open(sense_index_map_file, 'wb') as write_file: pickle.dump(sense_index_map, write_file, pickle.HIGHEST_PROTOCOL) print('building senses similarity matrix', time.strftime("%H:%M:%S")) index = 1 for sense1 in senses: sense1_index = sense_index_map[sense1.name()] similarity_matrix[sense1_index, sense1_index] = 1.0 for sense2 in senses[index:]: similarity = sense1.wup_similarity(sense2) sense2_index = sense_index_map[sense2.name()] similarity_matrix[sense1_index, sense2_index] = similarity similarity_matrix[sense2_index, sense1_index] = similarity if not index % 100: print('%s: completed %d/%d senses' % (time.strftime("%Y/%d/%m-%H:%M:%S"), index, num_senses)) index += 1 print('finished senses similarity matrix', time.strftime("%H:%M:%S")) print(similarity_matrix[1, 1]) print(f[Constants.ITEM_TYPE + "_sense_similarity_matrix"][0][0]) # print(similarity_matrix) f.close() def main(): base_dir = Constants.DATASET_FOLDER dataset = Constants.ITEM_TYPE print('Building %s similarity matrix' % Constants.ITEM_TYPE) reviews_file = base_dir + 'reviews_' + dataset + '_shuffled.pkl' with open(reviews_file, 'rb') as read_file: reviews = pickle.load(read_file) # reviews = reviews[20:30] all_senses = list(generate_all_senses(reviews)) print('num senses: %d' % len(all_senses)) build_hdf5_sense_similarity_matrix(all_senses) def build_groups2(nouns): print('building groups', time.strftime("%H:%M:%S")) all_senses = set() sense_word_map = {} for noun in nouns: senses = wordnet.synsets(noun, pos='n') all_senses.update(senses) for sense in senses: if sense.name() not in sense_word_map: sense_word_map[sense.name()] = [] sense_word_map[sense.name()].append(noun) all_senses = list(all_senses) all_senses_names = [sense.name() for sense in all_senses] print('number of senses:', len(all_senses)) sense_similarity_matrix, sense_similarity_matrix_columns =\ get_sense_similarity_submatrix(all_senses_names) print('submatrix ready', time.strftime("%H:%M:%S")) # affinity_propagation = AffinityPropagation() # labels1 = affinity_propagation.fit_predict(sense_similarity_matrix) # print('affinity propagation ready', time.strftime("%H:%M:%S")) grouper = BaumanSensesGrouper(sense_similarity_matrix, 0.7) groups = grouper.group_senses() print('groups') # print(groups) new_groups = [] for group in groups: new_group = set() for element in group: sense_name = sense_similarity_matrix_columns[element] new_group.add(sense_name) new_groups.append(new_group) print('finished groups', time.strftime("%H:%M:%S")) # print(groups) # print(new_groups) print('num groups: %d' % len(groups)) sense_groups = [] for group in new_groups: sense_group = SenseGroup(group) for sense in sense_group.senses: sense_group.nouns |= set(sense_word_map[sense]) sense_groups.append(sense_group) return sense_groups def get_sense_similarity_submatrix(sense_names_list): columns = [] sense_index_map_file = Constants.DATASET_FOLDER + Constants.ITEM_TYPE +\ '_sense_index_map.pkl' with open(sense_index_map_file, 'rb') as read_file: sense_index_map = pickle.load(read_file) inverse_sense_index_map = {v: k for k, v in sense_index_map.iteritems()} for sense_name in sense_names_list: columns.append(sense_index_map[sense_name]) columns.sort() submatrix_senses = [inverse_sense_index_map[column] for column in columns] hdf5_file = Constants.DATASET_FOLDER + Constants.ITEM_TYPE + '_sense_similarity_matrix.hdf5' f = h5py.File(hdf5_file, 'r') similarity_matrix = f[Constants.ITEM_TYPE + "_sense_similarity_matrix"] matrix_size = len(similarity_matrix) print('sense similarity matrix length: %d' % matrix_size) # print(similarity_matrix[1, 1]) # columns = range(5) submatrix = similarity_matrix[columns, :][:, columns] print('%s: obtained submatrix, length: %d' % (time.strftime("%Y/%d/%m-%H:%M:%S"), len(submatrix))) # new_senses = [] return submatrix, submatrix_senses def evaluate_clustering(): similarity_matrix = get_sense_similarity_submatrix(range(10000)) matrix_size = len(similarity_matrix) print('got matrix') affinity_propagation = AffinityPropagation() labels1 = affinity_propagation.fit_predict(similarity_matrix) print('affinity propagation') dbscan = DBSCAN(min_samples=1) labels2 = dbscan.fit_predict(similarity_matrix) print('print dbscan') distance_matrix = np.ndarray((matrix_size, matrix_size)) for i in range(matrix_size): for j in range(matrix_size): distance_matrix[i, j] = 1 - similarity_matrix[i, j] print(distance_matrix[1, 2]) print(distance_matrix[1, 1]) print('created distance matrix') cluster_map1 = cluster_evaluation.fpena_get_clusters(labels1) cluster_map2 = cluster_evaluation.fpena_get_clusters(labels2) print(cluster_map1) print(cluster_map2) sc1 = sklearn.metrics.silhouette_score(distance_matrix, labels1, metric='euclidean') sc2 = sklearn.metrics.silhouette_score(distance_matrix, labels2, metric='euclidean') sc5 = cluster_evaluation.fpena_evaluate(cluster_map1, distance_matrix) sc6 = cluster_evaluation.fpena_evaluate(cluster_map2, distance_matrix) num_elements1 = [len(values) for values in cluster_map1.values()] num_elements2 = [len(values) for values in cluster_map2.values()] print(num_elements1) print(num_elements2) print('Number of clusters Affinity Propagation: %f' % len(cluster_map1)) print('Number of clusters DBSCAN: %f' % len(cluster_map2)) print('Average elements per cluster Affinity Propagation: %f' % np.mean(num_elements1)) print('Average elements per cluster DBSCAN: %f' % np.mean(num_elements2)) print('Standard deviation per cluster Affinity Propagation: %f' % np.std(num_elements1)) print('Standard deviation per cluster DBSCAN: %f' % np.std(num_elements2)) print('Silouhette score Affinity Propagation (distance matrix): %f' % sc1) print('Silouhette score DBSCAN (distance matrix): %f' % sc2) print('Dunn index Affinity Propagation (distance matrix): %f' % sc5) print('Dunn index DBSCAN (distance matrix): %f' % sc6) # start = time.time() # main() # evaluate_clustering() # get_similarity_submatrix() # end = time.time() # total_time = end - start # print("Total time = %f seconds" % total_time)
lgpl-2.1
lucasbrunialti/biclustering-experiments
experiments/run_algo.py
1
23647
import sys import time import h5py import codecs import subprocess import numpy as np import pandas as pd import skfuzzy as fuzz from argparse import ArgumentParser # from fnmtf import fnmtf from davies_bouldin import davies_bouldin_score, calculate_centroids_doc_mean # from onmtf import matrix_factorization_clustering from sklearn.cluster import KMeans from sklearn.datasets import fetch_20newsgroups from sklearn.metrics.cluster import adjusted_rand_score from sklearn.metrics.cluster import from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer class Dataset(object): @classmethod def fromdataframe(cls, dataframe): s = cls() s.__dataframe = dataframe s.__target_names = dataframe['channel'].unique().tolist() s.__target = s.build_targets() s.__data = dataframe['all'] s.__name = 'ig' return s @classmethod def fromnumpyarray(cls, arr, labels): s = cls() s.__target = labels s.__data = arr s.__name = 'nips' return s def build_targets(self): classes_index = list(range(len(self.target_names))) target_names_to_index = {k: v for k, v in zip(self.target_names, classes_index)} return np.array([target_names_to_index[name] for name in self.dataframe['channel']]) @property def name(self): return self.__name @property def dataframe(self): return self.__dataframe @property def target(self): return self.__target @property def target_names(self): return self.__target_names @property def data(self): return self.__data def get_dataset(dataset_name): if dataset_name == 'newsgroup': return fetch_20newsgroups(subset='all') elif dataset_name == 'ig': ig_df = pd.read_pickle('all_news_df.pkl') return Dataset.fromdataframe(ig_df) elif dataset_name == 'igtoy': arena_news_df = pd.read_pickle('arena_news_df.pkl') sport_news_df = pd.read_pickle('sport_news_df.pkl') jovem_news_df = pd.read_pickle('jovem_news_df.pkl') labels_true = np.array(len(arena_news_df.ix[0:99])*[0] + len(sport_news_df.ix[0:99])*[1] + len(jovem_news_df.ix[0:99])*[2]) count_vect = CountVectorizer(encoding='UTF-8',lowercase=False, min_df=2) X = count_vect.fit_transform(arena_news_df['all'].ix[0:99].tolist() + sport_news_df['all'].ix[0:99].tolist() + jovem_news_df['all'].ix[0:99].tolist()) return Dataset.fromnumpyarray(X, labels_true) elif dataset_name == 'nips': arr = np.load('nips_data') labels = np.load('nips_labels') return Dataset.fromnumpyarray(arr, labels) def preprocess(dataset): if dataset.name == 'ig': vectorizer = CountVectorizer(stop_words='english', min_df=2) X = vectorizer.fit_transform(dataset.data) else: X = dataset.data X_train_norm_tfidf = TfidfTransformer(norm=u'l2', use_idf=True).fit_transform(X) X_train_tfidf = TfidfTransformer(use_idf=True).fit_transform(X) X_train_norm = TfidfTransformer(norm=u'l2', use_idf=False).fit_transform(X) X_train = TfidfTransformer(use_idf=False).fit_transform(X) return X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf def run_kmeans(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k': [10, 15, 20, 25, 30], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k': [13], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'nips': { 'k': [9], 'l': [5, 7, 9, 11, 13], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] } } output_file = codecs.open(dataset_name + '_kmeans_news_results.csv', 'w', 'utf-8') output_file.write('X,K,NMI,RAND,DAVIES\n') for k in params[dataset_name]['k']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) error_best = np.inf for _ in range(10): tick1 = time.time() datat = data.T # n, _ = data.shape # temp = np.diag(np.squeeze(np.asarray((data.dot(datat).dot(np.ones(n).reshape(n, 1)))))) # d = datat.dot(np.sqrt(temp)) estimator = KMeans(n_clusters=k, max_iter=10000) estimator.fit(data) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'kmeans')) labels_pred = estimator.labels_ centroids = estimator.cluster_centers_ error = estimator.inertia_ nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, centroids) tick3 = time.time() print(u'Took {} secs to calculate {} metrics...'.format((tick3 - tick2), 'kmeans')) output_file.write(u'{},{},{},{},{}\n'.format(data_str, k, nmi_score, rand_score, davies_score)) print('Execution: X: {}, k: {}'.format(data_str, k)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') output_file.close() def run_fkmeans(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k': [20], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k': [13], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'nips': { 'k': [9], 'l': [5, 7, 9, 11, 13], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] } } output_file = codecs.open(dataset_name + '_fuzzy_cmeans_news_results.csv', 'w', 'utf-8') output_file.write('X,K,NMI,RAND,DAVIES\n') output_file.flush() for k in params[dataset_name]['k']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) error_best = np.inf for _ in range(10): tick1 = time.time() centroids, U, _, _, errors, _, _ = fuzz.cluster.cmeans( data.T, k, 2, error=0.00000000001, maxiter=10000) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'fkmeans')) labels_pred = np.argmax(U, axis=0) error = errors[-1] nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, centroids) tick3 = time.time() print(u'Took {} secs to calculate {} metrics...'.format((tick3 - tick2), 'fkmeans')) output_file.write(u'{},{},{},{},{}\n'.format(data_str, k, nmi_score, rand_score, davies_score)) output_file.flush() print('Execution: X: {}, k: {}'.format(data_str, k)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') output_file.close() def run_onmtf(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k' : [20], 'l' : [15, 20, 25, 30], 'X' : ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k' : [7, 10, 13, 16, 19], 'l' : [19], 'X' : ['X_train_norm_tfidf'] # 'X' : ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'nips': { 'k': [9], 'l': [6, 9, 12, 15, 18], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] } } if kk: filename = dataset_name + '_kk=' + str(kk) + '_ll=' + str(ll) + '_onmtf_news_results.csv' params[dataset_name]['k'] = [int(kk)] params[dataset_name]['l'] = [int(ll)] else: filename = dataset_name + '_onmtf_news_results.csv' out_f = codecs.open(filename, 'w', 'utf-8') out_f.write('X,K,L,NMI,RAND,DAVIES\n') for k in params[dataset_name]['k']: for l in params[dataset_name]['l']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) h5f = h5py.File('data.h5', 'w') h5f.create_dataset('X', data=data.T) h5f.close() error_best = np.inf for _ in range(10): tick1 = time.time() proc = subprocess.Popen(['./algos_gpu', 'onmtf', str(k), str(l), '10000'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() print 'out:', out U = np.genfromtxt('U.csv', delimiter=',') S = np.genfromtxt('S.csv', delimiter=',') V = np.genfromtxt('V.csv', delimiter=',') with open('error.csv') as f: error = float(f.read()) labels_pred = np.argmax(U, axis=1) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'onmtf')) nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, calculate_centroids_doc_mean(data, labels_pred, k)) tick3 = time.time() print(u'Took {} secs to calculate {} metrics...'.format((tick3 - tick2), 'onmtf')) out_f.write(u'{},{},{},{},{},{}\n'.format(data_str, k, l, nmi_score, rand_score, davies_score)) print('Execution: X: {}, k: {}'.format(data_str, k)) print('Algo error: {}'.format(error_best)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') out_f.close() def run_fnmtf(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k': [20], 'l': [15, 20, 25, 30], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k': [7, 10, 13, 16, 19], 'l': [7, 10, 13, 16, 19], # 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'nips': { 'k': [9], 'l': [6, 9, 12, 15, 18], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] # 'X': ['X_train', 'X_train_tfidf'] } } if kk: filename = dataset_name + '_kk=' + str(kk) + '_ll=' + str(ll) + '_fnmtf_news_results.csv' params[dataset_name]['k'] = [int(kk)] params[dataset_name]['l'] = [int(ll)] else: filename = dataset_name + '_fnmtf_news_results.csv' out_f = codecs.open(filename, 'w', 'utf-8') out_f.write('X,K,L,NMI,RAND,DAVIES\n') for k in params[dataset_name]['k']: for l in params[dataset_name]['l']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) h5f = h5py.File('data.h5', 'w') h5f.create_dataset('X', data=data.T) h5f.close() error_best = np.inf for _ in xrange(10): tick1 = time.time() # U, S, V, labels_pred, _, error = fnmtf(data, k, l) proc = subprocess.Popen(['./algos', 'fnmtf', str(k), str(l), '10000'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() print('out: {}'.format(out)) U = np.genfromtxt('U.csv', delimiter=',') S = np.genfromtxt('S.csv', delimiter=',') V = np.genfromtxt('V.csv', delimiter=',') with open('error.csv') as f: error = float(f.read()) labels_pred = np.argmax(U, axis=1) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'fnmtf')) nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, calculate_centroids_doc_mean(data, labels_pred, k)) out_f.write(u'{},{},{},{},{},{}\n'.format(data_str, k, l, nmi_score, rand_score, davies_score)) print('Execution: X: {}, k: {}, l: {}'.format(data_str, k, l)) print('Algo error: {}'.format(error_best)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') def run_ovnmtf(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k': [20], 'l': [15, 20, 25, 30], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k': [13], 'l': [7, 10, 13, 16, 19], # 'X': ['X_train_norm_tfidf'] 'X': ['X_train_norm', 'X_train_tfidf'] }, 'nips': { 'k': [9], 'l': [6, 9, 12, 15, 18], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] } } if kk: filename = dataset_name + '_kk=' + str(kk) + '_ll=' + str(ll) + '_ovnmtf_news_results.csv' params[dataset_name]['k'] = [int(kk)] params[dataset_name]['l'] = [int(ll)] else: filename = dataset_name + '_ovnmtf_news_results.csv' out_f = codecs.open(filename, 'w', 'utf-8') out_f.write('X,K,L,NMI,RAND,DAVIES\n') for k in params[dataset_name]['k']: for l in params[dataset_name]['l']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) h5f = h5py.File('data.h5', 'w') h5f.create_dataset('X', data=data.T) h5f.close() error_best = np.inf for _ in xrange(10): tick1 = time.time() # U, S, V, labels_pred, _, error = fnmtf(data, k, l) proc = subprocess.Popen(['./algos_gpu', 'ovnmtf', str(k), str(l), '10000'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() print('out: {}'.format(out)) U = np.genfromtxt('U.csv', delimiter=',') S = np.genfromtxt('S.csv', delimiter=',') # V = np.genfromtxt('V.csv', delimiter=',') with open('error.csv') as f: error = float(f.read()) labels_pred = np.argmax(U, axis=1) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'ovnmtf')) nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, calculate_centroids_doc_mean(data, labels_pred, k)) out_f.write(u'{},{},{},{},{},{}\n'.format(data_str, k, l, nmi_score, rand_score, davies_score)) print('Execution: X: {}, k: {}, l: {}'.format(data_str, k, l)) print('Algo error: {}'.format(error_best)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') def run_bin_ovnmtf(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, labels_true, dataset_name, kk, ll): params = { 'newsgroup': { 'k': [20], 'l': [15, 20, 25, 30], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'igtoy': { 'k': [3], 'l': [2, 3, 4, 5, 6], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'ig': { 'k': [7, 10, 13, 16, 19], 'l': [7, 10, 13, 16, 19], 'X': ['X_train_norm_tfidf'] # 'X': ['X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] }, 'nips': { 'k': [9], 'l': [6, 9, 12, 15, 18], 'X': ['X_train', 'X_train_norm', 'X_train_tfidf', 'X_train_norm_tfidf'] } } if kk: filename = dataset_name + '_kk=' + str(kk) + '_ll=' + str(ll) + '_X=' + params[dataset_name]['X'][0] + '_bin_ovnmtf_news_results.csv' params[dataset_name]['k'] = [int(kk)] params[dataset_name]['l'] = [int(ll)] else: filename = dataset_name + '_bin_ovnmtf_news_results.csv' out_f = codecs.open(filename, 'w', 'utf-8') out_f.write('X,K,L,NMI,RAND,DAVIES\n') for k in params[dataset_name]['k']: for l in params[dataset_name]['l']: for data_str in params[dataset_name]['X']: data = eval(data_str) data = data.toarray().astype(np.float64) h5f = h5py.File('data.h5', 'w') h5f.create_dataset('X', data=data.T) h5f.close() error_best = np.inf for _ in xrange(10): tick1 = time.time() # U, S, V, labels_pred, _, error = fnmtf(data, k, l) proc = subprocess.Popen(['./algos', 'bin_ovnmtf', str(k), str(l), '10000'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() print('out: {}'.format(out)) U = np.genfromtxt('U.csv', delimiter=',') S = np.genfromtxt('S.csv', delimiter=',') # V = np.genfromtxt('V.csv', delimiter=',') with open('error.csv') as f: error = float(f.read()) labels_pred = np.argmax(U, axis=1) tick2 = time.time() print(u'Took {} secs to train the {} model...'.format((tick2 - tick1), 'bin_ovnmtf')) nmi_score = normalized_mutual_info_score(labels_true, labels_pred) rand_score = adjusted_rand_score(labels_true, labels_pred) davies_score = davies_bouldin_score(data, labels_pred, calculate_centroids_doc_mean(data, labels_pred, k)) out_f.write(u'{},{},{},{},{},{}\n'.format(data_str, k, l, nmi_score, rand_score, davies_score)) print('Execution: X: {}, k: {}, l: {}'.format(data_str, k, l)) print('Algo error: {}'.format(error)) print('NMI score: {}'.format(nmi_score)) print('Rand score: {}'.format(rand_score)) print('Davies score: {}'.format(davies_score)) print('-----------------------------------------------\n') def main(): parser = ArgumentParser() parser.add_argument('-d', '--dataset', choices=('ig', 'igtoy', 'newsgroup', 'nips')) parser.add_argument('-a', '--algo', choices=('onmtf', 'kmeans', 'fnmtf', 'fkmeans', 'bin_ovnmtf', 'ovnmtf')) parser.add_argument('-k', help='number of row clusters', required=False) parser.add_argument('-l', help='number of column clusters', required=False) args = parser.parse_args() # dataset_name = sys.argv[1] # algorithm_to_run = sys.argv[2] # k = sys.argv[3] # print('Could not find algorithm to run argument and/or dataset name!!!') # raise SystemError(1) current_module = sys.modules[__name__] function_to_run_str = 'run_{}'.format(args.algo) function_to_run = getattr(current_module, function_to_run_str) dataset = get_dataset(args.dataset) X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf = preprocess(dataset) function_to_run(X_train, X_train_norm, X_train_tfidf, X_train_norm_tfidf, dataset.target, args.dataset, args.k, args.l) if __name__ == '__main__': main()
bsd-2-clause
gfyoung/pandas
pandas/tests/tslibs/test_liboffsets.py
3
5095
""" Tests for helper functions in the cython tslibs.offsets """ from datetime import datetime import pytest from pandas._libs.tslibs.ccalendar import get_firstbday, get_lastbday import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import roll_qtrday from pandas import Timestamp @pytest.fixture(params=["start", "end", "business_start", "business_end"]) def day_opt(request): return request.param @pytest.mark.parametrize( "dt,exp_week_day,exp_last_day", [ (datetime(2017, 11, 30), 3, 30), # Business day. (datetime(1993, 10, 31), 6, 29), # Non-business day. ], ) def test_get_last_bday(dt, exp_week_day, exp_last_day): assert dt.weekday() == exp_week_day assert get_lastbday(dt.year, dt.month) == exp_last_day @pytest.mark.parametrize( "dt,exp_week_day,exp_first_day", [ (datetime(2017, 4, 1), 5, 3), # Non-weekday. (datetime(1993, 10, 1), 4, 1), # Business day. ], ) def test_get_first_bday(dt, exp_week_day, exp_first_day): assert dt.weekday() == exp_week_day assert get_firstbday(dt.year, dt.month) == exp_first_day @pytest.mark.parametrize( "months,day_opt,expected", [ (0, 15, datetime(2017, 11, 15)), (0, None, datetime(2017, 11, 30)), (1, "start", datetime(2017, 12, 1)), (-145, "end", datetime(2005, 10, 31)), (0, "business_end", datetime(2017, 11, 30)), (0, "business_start", datetime(2017, 11, 1)), ], ) def test_shift_month_dt(months, day_opt, expected): dt = datetime(2017, 11, 30) assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected @pytest.mark.parametrize( "months,day_opt,expected", [ (1, "start", Timestamp("1929-06-01")), (-3, "end", Timestamp("1929-02-28")), (25, None, Timestamp("1931-06-5")), (-1, 31, Timestamp("1929-04-30")), ], ) def test_shift_month_ts(months, day_opt, expected): ts = Timestamp("1929-05-05") assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected def test_shift_month_error(): dt = datetime(2017, 11, 15) day_opt = "this should raise" with pytest.raises(ValueError, match=day_opt): liboffsets.shift_month(dt, 3, day_opt=day_opt) @pytest.mark.parametrize( "other,expected", [ # Before March 1. (datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}), # After March 1. (Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}), ], ) @pytest.mark.parametrize("n", [2, -7, 0]) def test_roll_qtrday_year(other, expected, n): month = 3 day_opt = "start" # `other` will be compared to March 1. assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n] @pytest.mark.parametrize( "other,expected", [ # Before June 30. (datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}), # After June 30. (Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}), ], ) @pytest.mark.parametrize("n", [5, -7, 0]) def test_roll_qtrday_year2(other, expected, n): month = 6 day_opt = "end" # `other` will be compared to June 30. assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n] def test_get_day_of_month_error(): # get_day_of_month is not directly exposed. # We test it via roll_qtrday. dt = datetime(2017, 11, 15) day_opt = "foo" with pytest.raises(ValueError, match=day_opt): # To hit the raising case we need month == dt.month and n > 0. roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12) @pytest.mark.parametrize( "month", [3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3) ) @pytest.mark.parametrize("n", [4, -3]) def test_roll_qtr_day_not_mod_unequal(day_opt, month, n): expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}} other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday. assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n] @pytest.mark.parametrize( "other,month,exp_dict", [ # Monday. (datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}), # Saturday. ( Timestamp(2072, 10, 1, 6, 17, 18), 4, {2: {"end": 1, "business_end": 1, "business_start": 1}}, ), # First business day. ( Timestamp(2072, 10, 3, 6, 17, 18), 4, {2: {"end": 1, "business_end": 1}, -1: {"start": 0}}, ), ], ) @pytest.mark.parametrize("n", [2, -1]) def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt): # All cases have (other.month % 3) == (month % 3). expected = exp_dict.get(n, {}).get(day_opt, n) assert roll_qtrday(other, n, month, day_opt, modby=3) == expected @pytest.mark.parametrize( "n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})] ) @pytest.mark.parametrize("compare", [29, 1, 31]) def test_roll_convention(n, expected, compare): assert liboffsets.roll_convention(29, n, compare) == expected[compare]
bsd-3-clause
mojoboss/scikit-learn
examples/cluster/plot_birch_vs_minibatchkmeans.py
333
3694
""" ================================= Compare BIRCH and MiniBatchKMeans ================================= This example compares the timing of Birch (with and without the global clustering step) and MiniBatchKMeans on a synthetic dataset having 100,000 samples and 2 features generated using make_blobs. If ``n_clusters`` is set to None, the data is reduced from 100,000 samples to a set of 158 clusters. This can be viewed as a preprocessing step before the final (global) clustering step that further reduces these 158 clusters to 100 clusters. """ # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # License: BSD 3 clause print(__doc__) from itertools import cycle from time import time import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from sklearn.preprocessing import StandardScaler from sklearn.cluster import Birch, MiniBatchKMeans from sklearn.datasets.samples_generator import make_blobs # Generate centers for the blobs so that it forms a 10 X 10 grid. xx = np.linspace(-22, 22, 10) yy = np.linspace(-22, 22, 10) xx, yy = np.meshgrid(xx, yy) n_centres = np.hstack((np.ravel(xx)[:, np.newaxis], np.ravel(yy)[:, np.newaxis])) # Generate blobs to do a comparison between MiniBatchKMeans and Birch. X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0) # Use all colors that matplotlib provides by default. colors_ = cycle(colors.cnames.keys()) fig = plt.figure(figsize=(12, 4)) fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9) # Compute clustering with Birch with and without the final clustering step # and plot. birch_models = [Birch(threshold=1.7, n_clusters=None), Birch(threshold=1.7, n_clusters=100)] final_step = ['without global clustering', 'with global clustering'] for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)): t = time() birch_model.fit(X) time_ = time() - t print("Birch %s as the final step took %0.2f seconds" % ( info, (time() - t))) # Plot result labels = birch_model.labels_ centroids = birch_model.subcluster_centers_ n_clusters = np.unique(labels).size print("n_clusters : %d" % n_clusters) ax = fig.add_subplot(1, 3, ind + 1) for this_centroid, k, col in zip(centroids, range(n_clusters), colors_): mask = labels == k ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.') if birch_model.n_clusters is None: ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col, markeredgecolor='k', markersize=5) ax.set_ylim([-25, 25]) ax.set_xlim([-25, 25]) ax.set_autoscaley_on(False) ax.set_title('Birch %s' % info) # Compute clustering with MiniBatchKMeans. mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100, n_init=10, max_no_improvement=10, verbose=0, random_state=0) t0 = time() mbk.fit(X) t_mini_batch = time() - t0 print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch) mbk_means_labels_unique = np.unique(mbk.labels_) ax = fig.add_subplot(1, 3, 3) for this_centroid, k, col in zip(mbk.cluster_centers_, range(n_clusters), colors_): mask = mbk.labels_ == k ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.') ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k', markersize=5) ax.set_xlim([-25, 25]) ax.set_ylim([-25, 25]) ax.set_title("MiniBatchKMeans") ax.set_autoscaley_on(False) plt.show()
bsd-3-clause
KaelChen/numpy
numpy/lib/npyio.py
42
71218
from __future__ import division, absolute_import, print_function import sys import os import re import itertools import warnings import weakref from operator import itemgetter import numpy as np from . import format from ._datasource import DataSource from numpy.core.multiarray import packbits, unpackbits from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name ) from numpy.compat import ( asbytes, asstr, asbytes_nested, bytes, basestring, unicode ) if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle from future_builtins import map loads = pickle.loads __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' ] class BagObj(object): """ BagObj(obj) Convert attribute look-ups to getitems on the object passed in. Parameters ---------- obj : class instance Object on which attribute look-up is performed. Examples -------- >>> from numpy.lib.npyio import BagObj as BO >>> class BagDemo(object): ... def __getitem__(self, key): # An instance of BagObj(BagDemo) ... # will call this method when any ... # attribute look-up is required ... result = "Doesn't matter what you want, " ... return result + "you're gonna get this" ... >>> demo_obj = BagDemo() >>> bagobj = BO(demo_obj) >>> bagobj.hello_there "Doesn't matter what you want, you're gonna get this" >>> bagobj.I_can_be_anything "Doesn't matter what you want, you're gonna get this" """ def __init__(self, obj): # Use weakref to make NpzFile objects collectable by refcount self._obj = weakref.proxy(obj) def __getattribute__(self, key): try: return object.__getattribute__(self, '_obj')[key] except KeyError: raise AttributeError(key) def __dir__(self): """ Enables dir(bagobj) to list the files in an NpzFile. This also enables tab-completion in an interpreter or IPython. """ return object.__getattribute__(self, '_obj').keys() def zipfile_factory(*args, **kwargs): import zipfile kwargs['allowZip64'] = True return zipfile.ZipFile(*args, **kwargs) class NpzFile(object): """ NpzFile(fid) A dictionary-like object with lazy-loading of files in the zipped archive provided on construction. `NpzFile` is used to load files in the NumPy ``.npz`` data archive format. It assumes that files in the archive have a ``.npy`` extension, other files are ignored. The arrays and file strings are lazily loaded on either getitem access using ``obj['key']`` or attribute lookup using ``obj.f.key``. A list of all files (without ``.npy`` extensions) can be obtained with ``obj.files`` and the ZipFile object itself using ``obj.zip``. Attributes ---------- files : list of str List of all files in the archive with a ``.npy`` extension. zip : ZipFile instance The ZipFile object initialized with the zipped archive. f : BagObj instance An object on which attribute can be performed as an alternative to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Parameters ---------- fid : file or str The zipped archive to open. This is either a file-like object or a string containing the path to the archive. own_fid : bool, optional Whether NpzFile should close the file handle. Requires that `fid` is a file-like object. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True >>> npz.files ['y', 'x'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ def __init__(self, fid, own_fid=False, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) self._files = _zip.namelist() self.files = [] self.allow_pickle = allow_pickle self.pickle_kwargs = pickle_kwargs for x in self._files: if x.endswith('.npy'): self.files.append(x[:-4]) else: self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: self.fid = fid else: self.fid = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): """ Close the file. """ if self.zip is not None: self.zip.close() self.zip = None if self.fid is not None: self.fid.close() self.fid = None self.f = None # break reference cycle def __del__(self): self.close() def __getitem__(self, key): # FIXME: This seems like it will copy strings around # more than is strictly necessary. The zipfile # will read the string and then # the format.read_array will copy the string # to another place in memory. # It would be better if the zipfile could read # (or at least uncompress) the data # directly into the array memory. member = 0 if key in self._files: member = 1 elif key in self.files: member = 1 key += '.npy' if member: bytes = self.zip.open(key) magic = bytes.read(len(format.MAGIC_PREFIX)) bytes.close() if magic == format.MAGIC_PREFIX: bytes = self.zip.open(key) return format.read_array(bytes, allow_pickle=self.allow_pickle, pickle_kwargs=self.pickle_kwargs) else: return self.zip.read(key) else: raise KeyError("%s is not a file in the archive" % key) def __iter__(self): return iter(self.files) def items(self): """ Return a list of tuples, with each tuple (filename, array in file). """ return [(f, self[f]) for f in self.files] def iteritems(self): """Generator that returns tuples (filename, array in file).""" for f in self.files: yield (f, self[f]) def keys(self): """Return files in the archive with a ``.npy`` extension.""" return self.files def iterkeys(self): """Return an iterator over the files in the archive.""" return self.__iter__() def __contains__(self, key): return self.files.__contains__(key) def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII'): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. Parameters ---------- file : file-like object or string The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap` for a detailed description of the modes). A memory-mapped array is kept on disk. However, it can be accessed and sliced like any ndarray. Memory mapping is especially useful for accessing small fragments of large files without reading the entire file into memory. allow_pickle : bool, optional Allow loading pickled object arrays stored in npy files. Reasons for disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: True fix_imports : bool, optional Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' Returns ------- result : array, tuple, dict, etc. Data stored in the file. For ``.npz`` files, the returned instance of NpzFile class must be closed to avoid leaking file descriptors. Raises ------ IOError If the input file does not exist or cannot be read. ValueError The file contains an object array, but allow_pickle=False given. See Also -------- save, savez, savez_compressed, loadtxt memmap : Create a memory-map to an array stored in a file on disk. Notes ----- - If the file contains pickle data, then whatever object is stored in the pickle is returned. - If the file is a ``.npy`` file, then a single array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is returned, containing ``{filename: array}`` key-value pairs, one for each file in the archive. - If the file is a ``.npz`` file, the returned value supports the context manager protocol in a similar fashion to the open function:: with load('foo.npz') as data: a = data['a'] The underlying file descriptor is closed when exiting the 'with' block. Examples -------- Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') array([[1, 2, 3], [4, 5, 6]]) Store compressed data to disk, and load it again: >>> a=np.array([[1, 2, 3], [4, 5, 6]]) >>> b=np.array([1, 2]) >>> np.savez('/tmp/123.npz', a=a, b=b) >>> data = np.load('/tmp/123.npz') >>> data['a'] array([[1, 2, 3], [4, 5, 6]]) >>> data['b'] array([1, 2]) >>> data.close() Mem-map the stored array, and then access the second row directly from disk: >>> X = np.load('/tmp/123.npy', mmap_mode='r') >>> X[1, :] memmap([4, 5, 6]) """ import gzip own_fid = False if isinstance(file, basestring): fid = open(file, "rb") own_fid = True else: fid = file if encoding not in ('ASCII', 'latin1', 'bytes'): # The 'encoding' value for pickle also affects what encoding # the serialized binary data of Numpy arrays is loaded # in. Pickle does not pass on the encoding information to # Numpy. The unpickling code in numpy.core.multiarray is # written to assume that unicode data appearing where binary # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. # # Other encoding values can corrupt binary data, and we # purposefully disallow them. For the same reason, the errors= # argument is not exposed, as values other than 'strict' # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") if sys.version_info[0] >= 3: pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = {} try: # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = asbytes('PK\x03\x04') N = len(format.MAGIC_PREFIX) magic = fid.read(N) fid.seek(-N, 1) # back-up if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) # Transfer file ownership to NpzFile tmp = own_fid own_fid = False return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) elif magic == format.MAGIC_PREFIX: # .npy file if mmap_mode: return format.open_memmap(file, mode=mmap_mode) else: return format.read_array(fid, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Try a pickle if not allow_pickle: raise ValueError("allow_pickle=False, but file does not contain " "non-pickled data") try: return pickle.load(fid, **pickle_kwargs) except: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: if own_fid: fid.close() def save(file, arr, allow_pickle=True, fix_imports=True): """ Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- file : file or str File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string, a ``.npy`` extension will be appended to the file name if it does not already have one. allow_pickle : bool, optional Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be pickled in a Python 2 compatible way. If `fix_imports` is True, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. arr : array_like Array data to be saved. See Also -------- savez : Save several arrays into a ``.npz`` archive savetxt, load Notes ----- For a description of the ``.npy`` format, see the module docstring of `numpy.lib.format` or the Numpy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ own_fid = False if isinstance(file, basestring): if not file.endswith('.npy'): file = file + '.npy' fid = open(file, "wb") own_fid = True else: fid = file if sys.version_info[0] >= 3: pickle_kwargs = dict(fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = None try: arr = np.asanyarray(arr) format.write_array(fid, arr, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) finally: if own_fid: fid.close() def savez(file, *args, **kwds): """ Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- save : Save a single array to a binary file in NumPy format. savetxt : Save an array to a file as plain text. savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see `numpy.lib.format` or the Numpy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files ['arr_1', 'arr_0'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Using `savez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npzfile = np.load(outfile) >>> npzfile.files ['y', 'x'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ _savez(file, args, kwds, False) def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. Parameters ---------- file : str File name of ``.npz`` file. args : Arguments Function arguments. kwds : Keyword arguments Keywords. See Also -------- numpy.savez : Save several arrays into an uncompressed ``.npz`` file format numpy.load : Load the files created by savez_compressed. """ _savez(file, args, kwds, True) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile # Import deferred for startup time improvement import tempfile if isinstance(file, basestring): if not file.endswith('.npz'): file = file + '.npz' namedict = kwds for i, val in enumerate(args): key = 'arr_%d' % i if key in namedict.keys(): raise ValueError( "Cannot use un-named variables and keyword %s" % key) namedict[key] = val if compress: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) # Stage arrays in a temporary file on disk, before writing to zip. fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: format.write_array(fid, np.asanyarray(val), allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) fid.close() fid = None zipf.write(tmpfile, arcname=fname) finally: if fid: fid.close() finally: os.remove(tmpfile) zipf.close() def _getconv(dtype): """ Find the correct dtype converter. Adapted from matplotlib """ def floatconv(x): x.lower() if b'0x' in x: return float.fromhex(asstr(x)) return float(x) typ = dtype.type if issubclass(typ, np.bool_): return lambda x: bool(int(x)) if issubclass(typ, np.uint64): return np.uint64 if issubclass(typ, np.int64): return np.int64 if issubclass(typ, np.integer): return lambda x: int(float(x)) elif issubclass(typ, np.floating): return floatconv elif issubclass(typ, np.complex): return lambda x: complex(asstr(x)) elif issubclass(typ, np.bytes_): return bytes else: return str def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0): """ Load data from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators should return byte strings for Python 3k. dtype : data-type, optional Data-type of the resulting array; default: float. If this is a structured data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. comments : str or sequence, optional The characters or list of characters used to indicate the start of a comment; default: '#'. delimiter : str, optional The string used to separate values. By default, this is any whitespace. converters : dict, optional A dictionary mapping column number to a function that will convert that column to a float. E.g., if column 0 is a date string: ``converters = {0: datestr2num}``. Converters can also be used to provide a default value for missing data (but see also `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured data-type, arrays are returned for each field. Default is False. ndmin : int, optional The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 Returns ------- out : ndarray Data read from the text file. See Also -------- load, fromstring, fromregex genfromtxt : Load data with missing values handled as specified. scipy.io.loadmat : reads MATLAB data files Notes ----- This function aims to be a fast reader for simply formatted files. The `genfromtxt` function provides more sophisticated handling of, e.g., lines with missing values. .. versionadded:: 1.10.0 The strings produced by the Python float.hex method can be used as input for floats. Examples -------- >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) >>> d = StringIO("M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO("1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) >>> y array([ 2., 4.]) """ # Type conversions for Py3 convenience if comments is not None: if isinstance(comments, (basestring, bytes)): comments = [asbytes(comments)] else: comments = [asbytes(comment) for comment in comments] # Compile regex for comments beforehand comments = (re.escape(comment) for comment in comments) regex_comments = re.compile(asbytes('|').join(comments)) user_converters = converters if delimiter is not None: delimiter = asbytes(delimiter) if usecols is not None: usecols = list(usecols) fown = False try: if _is_string_like(fname): fown = True if fname.endswith('.gz'): import gzip fh = iter(gzip.GzipFile(fname)) elif fname.endswith('.bz2'): import bz2 fh = iter(bz2.BZ2File(fname)) elif sys.version_info[0] == 2: fh = iter(open(fname, 'U')) else: fh = iter(open(fname)) else: fh = iter(fname) except TypeError: raise ValueError('fname must be a string, file handle, or generator') X = [] def flatten_dtype(dt): """Unpack a structured data-type, and produce re-packing info.""" if dt.names is None: # If the dtype is flattened, return. # If the dtype has a shape, the dtype occurs # in the list more than once. shape = dt.shape if len(shape) == 0: return ([dt.base], None) else: packing = [(shape[-1], list)] if len(shape) > 1: for dim in dt.shape[-2::-1]: packing = [(dim*packing[0][0], packing*dim)] return ([dt.base] * int(np.prod(dt.shape)), packing) else: types = [] packing = [] for field in dt.names: tp, bytes = dt.fields[field] flat_dt, flat_packing = flatten_dtype(tp) types.extend(flat_dt) # Avoid extra nesting for subarrays if len(tp.shape) > 0: packing.extend(flat_packing) else: packing.append((len(flat_dt), flat_packing)) return (types, packing) def pack_items(items, packing): """Pack items into nested lists based on re-packing info.""" if packing is None: return items[0] elif packing is tuple: return tuple(items) elif packing is list: return list(items) else: start = 0 ret = [] for length, subpacking in packing: ret.append(pack_items(items[start:start+length], subpacking)) start += length return tuple(ret) def split_line(line): """Chop off comments, strip, and split at delimiter. Note that although the file is opened as text, this function returns bytes. """ line = asbytes(line) if comments is not None: line = regex_comments.split(asbytes(line), maxsplit=1)[0] line = line.strip(asbytes('\r\n')) if line: return line.split(delimiter) else: return [] try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) defconv = _getconv(dtype) # Skip the first `skiprows` lines for i in range(skiprows): next(fh) # Read until we find a line with some values, and use # it to estimate the number of columns, N. first_vals = None try: while not first_vals: first_line = next(fh) first_vals = split_line(first_line) except StopIteration: # End of lines reached first_line = '' first_vals = [] warnings.warn('loadtxt: Empty input file: "%s"' % fname) N = len(usecols or first_vals) dtype_types, packing = flatten_dtype(dtype) if len(dtype_types) > 1: # We're dealing with a structured array, each field of # the dtype matches a column converters = [_getconv(dt) for dt in dtype_types] else: # All fields have the same dtype converters = [defconv for i in range(N)] if N > 1: packing = [(N, tuple)] # By preference, use the converters specified by the user for i, conv in (user_converters or {}).items(): if usecols: try: i = usecols.index(i) except ValueError: # Unused converter specified continue converters[i] = conv # Parse each line, including the first for i, line in enumerate(itertools.chain([first_line], fh)): vals = split_line(line) if len(vals) == 0: continue if usecols: vals = [vals[i] for i in usecols] if len(vals) != N: line_num = i + skiprows + 1 raise ValueError("Wrong number of columns at line %d" % line_num) # Convert each value according to its column and store items = [conv(val) for (conv, val) in zip(converters, vals)] # Then pack it according to the dtype's nesting items = pack_items(items, packing) X.append(items) finally: if fown: fh.close() X = np.array(X, dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) # Verify that the array has at least dimensions `ndmin`. # Check correctness of the values of `ndmin` if ndmin not in [0, 1, 2]: raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) # Tweak the size and shape of the arrays - remove extraneous dimensions if X.ndim > ndmin: X = np.squeeze(X) # and ensure we have the minimum number of dimensions asked for # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 if X.ndim < ndmin: if ndmin == 1: X = np.atleast_1d(X) elif ndmin == 2: X = np.atleast_2d(X).T if unpack: if len(dtype_types) > 1: # For structured arrays, return an array for each field. return [X[field] for field in dtype.names] else: return X.T else: return X def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '): """ Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns c) a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) own_fh = False if _is_string_like(fname): own_fh = True if fname.endswith('.gz'): import gzip fh = gzip.open(fname, 'wb') else: if sys.version_info[0] >= 3: fh = open(fname, 'wb') else: fh = open(fname, 'w') elif hasattr(fname, 'write'): fh = fname else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt,)) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(asbytes(comments + header + newline)) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) fh.write(asbytes(format % tuple(row2) + newline)) else: for row in X: try: fh.write(asbytes(format % tuple(row) + newline)) except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(asbytes(comments + footer + newline)) finally: if own_fh: fh.close() def fromregex(file, regexp, dtype): """ Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : str or file File name or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. Returns ------- output : ndarray The output array, containing the part of the content of `file` that was matched by `regexp`. `output` is always a structured array. Raises ------ TypeError When `dtype` is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see `doc.structured_arrays`. Examples -------- >>> f = open('test.dat', 'w') >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', '<i8'), ('key', '|S3')]) >>> output['num'] array([1312, 1534, 444], dtype=int64) """ own_fh = False if not hasattr(file, "read"): file = open(file, 'rb') own_fh = True try: if not hasattr(regexp, 'match'): regexp = re.compile(asbytes(regexp)) if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) output.dtype = dtype else: output = np.array(seq, dtype=dtype) return output finally: if own_fh: file.close() #####-------------------------------------------------------------------------- #---- --- ASCII functions --- #####-------------------------------------------------------------------------- def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None): """ Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skiprows : int, optional `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was removed in numpy 1.10. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first valid line after the first `skip_header` lines. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variables names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. max_rows : int, optional The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. .. versionadded:: 1.10.0 Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`, there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] Numpy User Guide, section `I/O with Numpy <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples --------- >>> from io import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO("1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Using dtype = None >>> s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Specifying dtype and names >>> s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) An example with fixed-width columns >>> s = StringIO("11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, 'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) """ if max_rows is not None: if skip_footer: raise ValueError( "The keywords 'skip_footer' and 'max_rows' can not be " "specified at the same time.") if max_rows < 1: raise ValueError("'max_rows' must be at least 1.") # Py3 data conversions to bytes, for convenience if comments is not None: comments = asbytes(comments) if isinstance(delimiter, unicode): delimiter = asbytes(delimiter) if isinstance(missing_values, (unicode, list, tuple)): missing_values = asbytes_nested(missing_values) # if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters user_converters = converters or {} if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if isinstance(fname, basestring): if sys.version_info[0] == 2: fhd = iter(np.lib._datasource.open(fname, 'rbU')) else: fhd = iter(np.lib._datasource.open(fname, 'rb')) own_fhd = True else: fhd = iter(fname) except TypeError: raise TypeError( "fname must be a string, filehandle, or generator. " "(got %s instead)" % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip)._handyman validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Skip the first `skip_header` rows for i in range(skip_header): next(fhd) # Keep on until we find the first valid values first_values = None try: while not first_values: first_line = next(fhd) if names is True: if comments in first_line: first_line = ( asbytes('').join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty first_line = asbytes('') first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname) # Should we take the first values as names ? if names is True: fval = first_values[0].strip() if fval in comments: del first_values[0] # Check the columns to use: make sure `usecols` is a list if usecols is not None: try: usecols = [_.strip() for _ in usecols.split(",")] except AttributeError: try: usecols = list(usecols) except TypeError: usecols = [usecols, ] nbcols = len(usecols or first_values) # Check the names and overwrite the dtype.names if needed if names is True: names = validate_names([_bytes_to_name(_.strip()) for _ in first_values]) first_line = asbytes('') elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: names = validate_names(names) # Get the dtype if dtype is not None: dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Make sure the names is a list (for 2.5) if names is not None: names = list(names) if usecols: for (i, current) in enumerate(usecols): # if usecols is a list of names, convert to a list of indices if _is_string_like(current): usecols[i] = names.index(current) elif current < 0: usecols[i] = current + len(first_values) # If the dtype is not None, make sure we update it if (dtype is not None) and (len(dtype) > nbcols): descr = dtype.descr dtype = np.dtype([descr[_] for _ in usecols]) names = list(dtype.names) # If `names` is not None, update the names elif (names is not None) and (len(names) > nbcols): names = [names[_] for _ in usecols] elif (names is not None) and (dtype is not None): names = list(dtype.names) # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () # Define the list of missing_values (one column: one list) missing_values = [list([asbytes('')]) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): # Loop on the items for (key, val) in user_missing_values.items(): # Is the key a string ? if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped continue # Redefine the key as needed if it's a column number if usecols: try: key = usecols.index(key) except ValueError: pass # Transform the value as a list of string if isinstance(val, (list, tuple)): val = [str(_) for _ in val] else: val = [str(val), ] # Add the value(s) to the current list of missing if key is None: # None acts as default for miss in missing_values: miss.extend(val) else: missing_values[key].extend(val) # We have a sequence : each item matches a column elif isinstance(user_missing_values, (list, tuple)): for (value, entry) in zip(user_missing_values, missing_values): value = str(value) if value not in entry: entry.append(value) # We have a string : apply it to all entries elif isinstance(user_missing_values, bytes): user_value = user_missing_values.split(asbytes(",")) for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries else: for entry in missing_values: entry.extend([str(user_missing_values)]) # Process the filling_values ............................... # Rename the input for convenience user_filling_values = filling_values if user_filling_values is None: user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually if isinstance(user_filling_values, dict): for (key, val) in user_filling_values.items(): if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped, continue # Redefine the key if it's a column number and usecols is defined if usecols: try: key = usecols.index(key) except ValueError: pass # Add the value to the list filling_values[key] = val # We have a sequence : update on a one-to-one basis elif isinstance(user_filling_values, (list, tuple)): n = len(user_filling_values) if (n <= nbcols): filling_values[:n] = user_filling_values else: filling_values = user_filling_values[:nbcols] # We have something else : use it for all entries else: filling_values = [user_filling_values] * nbcols # Initialize the converters ................................ if dtype is None: # Note: we can't use a [...]*nbcols, as we would have 3 times the same # ... converter, instead of 3 different converters. converters = [StringConverter(None, missing_values=miss, default=fill) for (miss, fill) in zip(missing_values, filling_values)] else: dtype_flat = flatten_dtype(dtype, flatten_base=True) # Initialize the converters if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) converters = [StringConverter(dt, locked=True, missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) converters = [StringConverter(dtype, locked=True, missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead if _is_string_like(j): try: j = names.index(j) i = j except ValueError: continue elif usecols: try: i = usecols.index(j) except ValueError: # Unused converter specified continue else: i = j # Find the value to test - first_line is not filtered by usecols: if len(first_line): testing_value = first_values[j] else: testing_value = None converters[i].update(conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) uc_update.append((i, conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. #miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows rows = [] append_to_rows = rows.append # ... masks if usemask: masks = [] append_to_masks = masks.append # ... invalid invalid = [] append_to_invalid = invalid.append # Parse each line for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): values = split_line(line) nbvalues = len(values) # Skip an empty line if nbvalues == 0: continue if usecols: # Select only the columns we need try: values = [values[_] for _ in usecols] except IndexError: append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) if usemask: append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) if len(rows) == max_rows: break if own_fhd: fhd.close() # Upgrade the converters (if needed) if dtype is None: for (i, converter) in enumerate(converters): current_column = [itemgetter(i)(_m) for _m in rows] try: converter.iterupgrade(current_column) except ConverterLockError: errmsg = "Converter #%i is locked and cannot be upgraded: " % i current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): errmsg += "(occurred line #%i for value '%s')" errmsg %= (j + 1 + skip_header, value) raise ConverterError(errmsg) # Check that we don't have invalid values nbinvalid = len(invalid) if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) invalid = invalid[:nbinvalid - nbinvalid_skipped] skip_footer -= nbinvalid_skipped # # nbrows -= skip_footer # errmsg = [template % (i, nb) # for (i, nb) in invalid if i < nbrows] # else: errmsg = [template % (i, nb) for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) # Raise an exception ? if invalid_raise: raise ValueError(errmsg) # Issue a warning ? else: warnings.warn(errmsg, ConversionWarning) # Strip the last skip_footer data if skip_footer > 0: rows = rows[:-skip_footer] if usemask: masks = masks[:-skip_footer] # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... if loose: rows = list( zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) else: rows = list( zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) # Reset the dtype data = rows if dtype is None: # Get the dtypes from the types of the converters column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) if v in (type('S'), np.string_)] # ... and take the largest number of chars. for i in strcolidx: column_types[i] = "|S%i" % max(len(row[i]) for row in data) # if names is None: # If the dtype is uniform, don't define names, else use '' base = set([c.type for c in converters if c._checked]) if len(base) == 1: (ddtype, mdtype) = (list(base)[0], np.bool) else: ddtype = [(defaultfmt % i, dt) for (i, dt) in enumerate(column_types)] if usemask: mdtype = [(defaultfmt % i, np.bool) for (i, dt) in enumerate(column_types)] else: ddtype = list(zip(names, column_types)) mdtype = list(zip(names, [np.bool] * len(column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed if names and dtype.names: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. if 'O' in (_.char for _ in dtype_flat): if has_nested_fields(dtype): raise NotImplementedError( "Nested fields involving objects are not supported...") else: output = np.array(data, dtype=dtype) else: rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) output = rows.view(dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array( masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) # Case #2. We have a basic dtype else: # We used some user-defined converters if user_converters: ishomogeneous = True descr = [] for i, ttype in enumerate([conv.type for conv in converters]): # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) if ttype == np.string_: ttype = "|S%i" % max(len(row[i]) for row in data) descr.append(('', ttype)) else: descr.append(('', dtype)) # So we changed the dtype ? if not ishomogeneous: # We have more than one field if len(descr) > 1: dtype = np.dtype(descr) # We have only one field: drop the name if not needed. else: dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: if dtype.names: mdtype = [(_, np.bool) for _ in dtype.names] else: mdtype = np.bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: for (name, conv) in zip(names or (), converters): missing_values = [conv(_) for _ in conv.missing_values if _ != asbytes('')] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array if usemask: output = output.view(MaskedArray) output._mask = outputmask if unpack: return output.squeeze().T return output.squeeze() def ndfromtxt(fname, **kwargs): """ Load ASCII data stored in a file and return it as a single array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function. """ kwargs['usemask'] = False return genfromtxt(fname, **kwargs) def mafromtxt(fname, **kwargs): """ Load ASCII data stored in a text file and return a masked array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. """ kwargs['usemask'] = True return genfromtxt(fname, **kwargs) def recfromtxt(fname, **kwargs): """ Load ASCII data from a file and return it in a record array. If ``usemask=False`` a standard `recarray` is returned, if ``usemask=True`` a MaskedRecords array is returned. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ kwargs.setdefault("dtype", None) usemask = kwargs.get('usemask', False) output = genfromtxt(fname, **kwargs) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output def recfromcsv(fname, **kwargs): """ Load ASCII data stored in a comma-separated file. The returned array is a record array (if ``usemask=False``, see `recarray`) or a masked record array (if ``usemask=True``, see `ma.mrecords.MaskedRecords`). Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ # Set default kwargs for genfromtxt as relevant to csv import. kwargs.setdefault("case_sensitive", "lower") kwargs.setdefault("names", True) kwargs.setdefault("delimiter", ",") kwargs.setdefault("dtype", None) output = genfromtxt(fname, **kwargs) usemask = kwargs.get("usemask", False) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output
bsd-3-clause
xwolf12/scikit-learn
examples/feature_selection/plot_feature_selection.py
249
2827
""" =============================== Univariate Feature Selection =============================== An example showing univariate feature selection. Noisy (non informative) features are added to the iris data and univariate feature selection is applied. For each feature, we plot the p-values for the univariate feature selection and the corresponding weights of an SVM. We can see that univariate feature selection selects the informative features and that these have larger SVM weights. In the total set of features, only the 4 first ones are significant. We can see that they have the highest score with univariate feature selection. The SVM assigns a large weight to one of these features, but also Selects many of the non-informative features. Applying univariate feature selection before the SVM increases the SVM weight attributed to the significant features, and will thus improve classification. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm from sklearn.feature_selection import SelectPercentile, f_classif ############################################################################### # import some data to play with # The iris dataset iris = datasets.load_iris() # Some noisy data not correlated E = np.random.uniform(0, 0.1, size=(len(iris.data), 20)) # Add the noisy data to the informative features X = np.hstack((iris.data, E)) y = iris.target ############################################################################### plt.figure(1) plt.clf() X_indices = np.arange(X.shape[-1]) ############################################################################### # Univariate feature selection with F-test for feature scoring # We use the default selection function: the 10% most significant features selector = SelectPercentile(f_classif, percentile=10) selector.fit(X, y) scores = -np.log10(selector.pvalues_) scores /= scores.max() plt.bar(X_indices - .45, scores, width=.2, label=r'Univariate score ($-Log(p_{value})$)', color='g') ############################################################################### # Compare to the weights of an SVM clf = svm.SVC(kernel='linear') clf.fit(X, y) svm_weights = (clf.coef_ ** 2).sum(axis=0) svm_weights /= svm_weights.max() plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r') clf_selected = svm.SVC(kernel='linear') clf_selected.fit(selector.transform(X), y) svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0) svm_weights_selected /= svm_weights_selected.max() plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected, width=.2, label='SVM weights after selection', color='b') plt.title("Comparing feature selection") plt.xlabel('Feature number') plt.yticks(()) plt.axis('tight') plt.legend(loc='upper right') plt.show()
bsd-3-clause
xubenben/scikit-learn
sklearn/cluster/birch.py
207
22706
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from math import sqrt from ..metrics.pairwise import euclidean_distances from ..base import TransformerMixin, ClusterMixin, BaseEstimator from ..externals.six.moves import xrange from ..utils import check_array from ..utils.extmath import row_norms, safe_sparse_dot from ..utils.validation import NotFittedError, check_is_fitted from .hierarchical import AgglomerativeClustering def _iterate_sparse_X(X): """This little hack returns a densified row when iterating over a sparse matrix, insted of constructing a sparse matrix for every row that is expensive. """ n_samples = X.shape[0] X_indices = X.indices X_data = X.data X_indptr = X.indptr for i in xrange(n_samples): row = np.zeros(X.shape[1]) startptr, endptr = X_indptr[i], X_indptr[i + 1] nonzero_indices = X_indices[startptr:endptr] row[nonzero_indices] = X_data[startptr:endptr] yield row def _split_node(node, threshold, branching_factor): """The node has to be split if there is no place for a new subcluster in the node. 1. Two empty nodes and two empty subclusters are initialized. 2. The pair of distant subclusters are found. 3. The properties of the empty subclusters and nodes are updated according to the nearest distance between the subclusters to the pair of distant subclusters. 4. The two nodes are set as children to the two subclusters. """ new_subcluster1 = _CFSubcluster() new_subcluster2 = _CFSubcluster() new_node1 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_node2 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_subcluster1.child_ = new_node1 new_subcluster2.child_ = new_node2 if node.is_leaf: if node.prev_leaf_ is not None: node.prev_leaf_.next_leaf_ = new_node1 new_node1.prev_leaf_ = node.prev_leaf_ new_node1.next_leaf_ = new_node2 new_node2.prev_leaf_ = new_node1 new_node2.next_leaf_ = node.next_leaf_ if node.next_leaf_ is not None: node.next_leaf_.prev_leaf_ = new_node2 dist = euclidean_distances( node.centroids_, Y_norm_squared=node.squared_norm_, squared=True) n_clusters = dist.shape[0] farthest_idx = np.unravel_index( dist.argmax(), (n_clusters, n_clusters)) node1_dist, node2_dist = dist[[farthest_idx]] node1_closer = node1_dist < node2_dist for idx, subcluster in enumerate(node.subclusters_): if node1_closer[idx]: new_node1.append_subcluster(subcluster) new_subcluster1.update(subcluster) else: new_node2.append_subcluster(subcluster) new_subcluster2.update(subcluster) return new_subcluster1, new_subcluster2 class _CFNode(object): """Each node in a CFTree is called a CFNode. The CFNode can have a maximum of branching_factor number of CFSubclusters. Parameters ---------- threshold : float Threshold needed for a new subcluster to enter a CFSubcluster. branching_factor : int Maximum number of CF subclusters in each node. is_leaf : bool We need to know if the CFNode is a leaf or not, in order to retrieve the final subclusters. n_features : int The number of features. Attributes ---------- subclusters_ : array-like list of subclusters for a particular CFNode. prev_leaf_ : _CFNode prev_leaf. Useful only if is_leaf is True. next_leaf_ : _CFNode next_leaf. Useful only if is_leaf is True. the final subclusters. init_centroids_ : ndarray, shape (branching_factor + 1, n_features) manipulate ``init_centroids_`` throughout rather than centroids_ since the centroids are just a view of the ``init_centroids_`` . init_sq_norm_ : ndarray, shape (branching_factor + 1,) manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. centroids_ : ndarray view of ``init_centroids_``. squared_norm_ : ndarray view of ``init_sq_norm_``. """ def __init__(self, threshold, branching_factor, is_leaf, n_features): self.threshold = threshold self.branching_factor = branching_factor self.is_leaf = is_leaf self.n_features = n_features # The list of subclusters, centroids and squared norms # to manipulate throughout. self.subclusters_ = [] self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) self.init_sq_norm_ = np.zeros((branching_factor + 1)) self.squared_norm_ = [] self.prev_leaf_ = None self.next_leaf_ = None def append_subcluster(self, subcluster): n_samples = len(self.subclusters_) self.subclusters_.append(subcluster) self.init_centroids_[n_samples] = subcluster.centroid_ self.init_sq_norm_[n_samples] = subcluster.sq_norm_ # Keep centroids and squared norm as views. In this way # if we change init_centroids and init_sq_norm_, it is # sufficient, self.centroids_ = self.init_centroids_[:n_samples + 1, :] self.squared_norm_ = self.init_sq_norm_[:n_samples + 1] def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): """Remove a subcluster from a node and update it with the split subclusters. """ ind = self.subclusters_.index(subcluster) self.subclusters_[ind] = new_subcluster1 self.init_centroids_[ind] = new_subcluster1.centroid_ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ self.append_subcluster(new_subcluster2) def insert_cf_subcluster(self, subcluster): """Insert a new subcluster into the node.""" if not self.subclusters_: self.append_subcluster(subcluster) return False threshold = self.threshold branching_factor = self.branching_factor # We need to find the closest subcluster among all the # subclusters so that we can insert our new subcluster. dist_matrix = np.dot(self.centroids_, subcluster.centroid_) dist_matrix *= -2. dist_matrix += self.squared_norm_ closest_index = np.argmin(dist_matrix) closest_subcluster = self.subclusters_[closest_index] # If the subcluster has a child, we need a recursive strategy. if closest_subcluster.child_ is not None: split_child = closest_subcluster.child_.insert_cf_subcluster( subcluster) if not split_child: # If it is determined that the child need not be split, we # can just update the closest_subcluster closest_subcluster.update(subcluster) self.init_centroids_[closest_index] = \ self.subclusters_[closest_index].centroid_ self.init_sq_norm_[closest_index] = \ self.subclusters_[closest_index].sq_norm_ return False # things not too good. we need to redistribute the subclusters in # our child node, and add a new subcluster in the parent # subcluster to accomodate the new child. else: new_subcluster1, new_subcluster2 = _split_node( closest_subcluster.child_, threshold, branching_factor) self.update_split_subclusters( closest_subcluster, new_subcluster1, new_subcluster2) if len(self.subclusters_) > self.branching_factor: return True return False # good to go! else: merged = closest_subcluster.merge_subcluster( subcluster, self.threshold) if merged: self.init_centroids_[closest_index] = \ closest_subcluster.centroid_ self.init_sq_norm_[closest_index] = \ closest_subcluster.sq_norm_ return False # not close to any other subclusters, and we still # have space, so add. elif len(self.subclusters_) < self.branching_factor: self.append_subcluster(subcluster) return False # We do not have enough space nor is it closer to an # other subcluster. We need to split. else: self.append_subcluster(subcluster) return True class _CFSubcluster(object): """Each subcluster in a CFNode is called a CFSubcluster. A CFSubcluster can have a CFNode has its child. Parameters ---------- linear_sum : ndarray, shape (n_features,), optional Sample. This is kept optional to allow initialization of empty subclusters. Attributes ---------- n_samples_ : int Number of samples that belong to each subcluster. linear_sum_ : ndarray Linear sum of all the samples in a subcluster. Prevents holding all sample data in memory. squared_sum_ : float Sum of the squared l2 norms of all samples belonging to a subcluster. centroid_ : ndarray Centroid of the subcluster. Prevent recomputing of centroids when ``CFNode.centroids_`` is called. child_ : _CFNode Child Node of the subcluster. Once a given _CFNode is set as the child of the _CFNode, it is set to ``self.child_``. sq_norm_ : ndarray Squared norm of the subcluster. Used to prevent recomputing when pairwise minimum distances are computed. """ def __init__(self, linear_sum=None): if linear_sum is None: self.n_samples_ = 0 self.squared_sum_ = 0.0 self.linear_sum_ = 0 else: self.n_samples_ = 1 self.centroid_ = self.linear_sum_ = linear_sum self.squared_sum_ = self.sq_norm_ = np.dot( self.linear_sum_, self.linear_sum_) self.child_ = None def update(self, subcluster): self.n_samples_ += subcluster.n_samples_ self.linear_sum_ += subcluster.linear_sum_ self.squared_sum_ += subcluster.squared_sum_ self.centroid_ = self.linear_sum_ / self.n_samples_ self.sq_norm_ = np.dot(self.centroid_, self.centroid_) def merge_subcluster(self, nominee_cluster, threshold): """Check if a cluster is worthy enough to be merged. If yes then merge. """ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_norm = np.dot(new_centroid, new_centroid) dot_product = (-2 * new_n) * new_norm sq_radius = (new_ss + dot_product) / new_n + new_norm if sq_radius <= threshold ** 2: (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = \ new_n, new_ls, new_ss, new_centroid, new_norm return True return False @property def radius(self): """Return radius of the subcluster""" dot_product = -2 * np.dot(self.linear_sum_, self.centroid_) return sqrt( ((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_) class Birch(BaseEstimator, TransformerMixin, ClusterMixin): """Implements the Birch clustering algorithm. Every new sample is inserted into the root of the Clustering Feature Tree. It is then clubbed together with the subcluster that has the centroid closest to the new sample. This is done recursively till it ends up at the subcluster of the leaf of the tree has the closest centroid. Read more in the :ref:`User Guide <birch>`. Parameters ---------- threshold : float, default 0.5 The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold. Otherwise a new subcluster is started. branching_factor : int, default 50 Maximum number of CF subclusters in each node. If a new samples enters such that the number of subclusters exceed the branching_factor then the node has to be split. The corresponding parent also has to be split and if the number of subclusters in the parent is greater than the branching factor, then it has to be split recursively. n_clusters : int, instance of sklearn.cluster model, default None Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples. By default, this final clustering step is not performed and the subclusters are returned as they are. If a model is provided, the model is fit treating the subclusters as new samples and the initial data is mapped to the label of the closest subcluster. If an int is provided, the model fit is AgglomerativeClustering with n_clusters set to the int. compute_labels : bool, default True Whether or not to compute labels for each fit. copy : bool, default True Whether or not to make a copy of the given data. If set to False, the initial data will be overwritten. Attributes ---------- root_ : _CFNode Root of the CFTree. dummy_leaf_ : _CFNode Start pointer to all the leaves. subcluster_centers_ : ndarray, Centroids of all subclusters read directly from the leaves. subcluster_labels_ : ndarray, Labels assigned to the centroids of the subclusters after they are clustered globally. labels_ : ndarray, shape (n_samples,) Array of labels assigned to the input data. if partial_fit is used instead of fit, they are assigned to the last batch of data. Examples -------- >>> from sklearn.cluster import Birch >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] >>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5, ... compute_labels=True) >>> brc.fit(X) Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None, threshold=0.5) >>> brc.predict(X) array([0, 0, 0, 1, 1, 1]) References ---------- * Tian Zhang, Raghu Ramakrishnan, Maron Livny BIRCH: An efficient data clustering method for large databases. http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf * Roberto Perdisci JBirch - Java implementation of BIRCH clustering algorithm https://code.google.com/p/jbirch/ """ def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True): self.threshold = threshold self.branching_factor = branching_factor self.n_clusters = n_clusters self.compute_labels = compute_labels self.copy = copy def fit(self, X, y=None): """ Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. """ self.fit_, self.partial_fit_ = True, False return self._fit(X) def _fit(self, X): X = check_array(X, accept_sparse='csr', copy=self.copy) threshold = self.threshold branching_factor = self.branching_factor if branching_factor <= 1: raise ValueError("Branching_factor should be greater than one.") n_samples, n_features = X.shape # If partial_fit is called for the first time or fit is called, we # start a new tree. partial_fit = getattr(self, 'partial_fit_') has_root = getattr(self, 'root_', None) if getattr(self, 'fit_') or (partial_fit and not has_root): # The first root is the leaf. Manipulate this object throughout. self.root_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) # To enable getting back subclusters. self.dummy_leaf_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) self.dummy_leaf_.next_leaf_ = self.root_ self.root_.prev_leaf_ = self.dummy_leaf_ # Cannot vectorize. Enough to convince to use cython. if not sparse.issparse(X): iter_func = iter else: iter_func = _iterate_sparse_X for sample in iter_func(X): subcluster = _CFSubcluster(linear_sum=sample) split = self.root_.insert_cf_subcluster(subcluster) if split: new_subcluster1, new_subcluster2 = _split_node( self.root_, threshold, branching_factor) del self.root_ self.root_ = _CFNode(threshold, branching_factor, is_leaf=False, n_features=n_features) self.root_.append_subcluster(new_subcluster1) self.root_.append_subcluster(new_subcluster2) centroids = np.concatenate([ leaf.centroids_ for leaf in self._get_leaves()]) self.subcluster_centers_ = centroids self._global_clustering(X) return self def _get_leaves(self): """ Retrieve the leaves of the CF Node. Returns ------- leaves: array-like List of the leaf nodes. """ leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while leaf_ptr is not None: leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done. """ self.partial_fit_, self.fit_ = True, False if X is None: # Perform just the final global clustering step. self._global_clustering() return self else: self._check_fit(X) return self._fit(X) def _check_fit(self, X): is_fitted = hasattr(self, 'subcluster_centers_') # Called by partial_fit, before fitting. has_partial_fit = hasattr(self, 'partial_fit_') # Should raise an error if one does not fit before predicting. if not (is_fitted or has_partial_fit): raise NotFittedError("Fit training data before predicting") if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]: raise ValueError( "Training data and predicted data do " "not have same number of features.") def predict(self, X): """ Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- labels: ndarray, shape(n_samples) Labelled data. """ X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= -2 reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)] def transform(self, X, y=None): """ Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data. """ check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_) def _global_clustering(self, X=None): """ Global clustering for the subclusters obtained after fitting """ clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering( n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True elif (clusterer is not None and not hasattr(clusterer, 'fit_predict')): raise ValueError("n_clusters should be an instance of " "ClusterMixin or an int") # To use in predict to avoid recalculation. self._subcluster_norms = row_norms( self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by Birch is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters)) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict( self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
bsd-3-clause
dsm054/pandas
pandas/tests/dtypes/test_common.py
1
23699
# -*- coding: utf-8 -*- import pytest import numpy as np import pandas as pd from pandas.core.dtypes.dtypes import (DatetimeTZDtype, PeriodDtype, CategoricalDtype, IntervalDtype) from pandas.core.sparse.api import SparseDtype import pandas.core.dtypes.common as com import pandas.util._test_decorators as td class TestPandasDtype(object): # Passing invalid dtype, both as a string or object, must raise TypeError # Per issue GH15520 @pytest.mark.parametrize('box', [pd.Timestamp, 'pd.Timestamp', list]) def test_invalid_dtype_error(self, box): with pytest.raises(TypeError, match='not understood'): com.pandas_dtype(box) @pytest.mark.parametrize('dtype', [ object, 'float64', np.object_, np.dtype('object'), 'O', np.float64, float, np.dtype('float64')]) def test_pandas_dtype_valid(self, dtype): assert com.pandas_dtype(dtype) == dtype @pytest.mark.parametrize('dtype', [ 'M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']) def test_numpy_dtype(self, dtype): assert com.pandas_dtype(dtype) == np.dtype(dtype) def test_numpy_string_dtype(self): # do not parse freq-like string as period dtype assert com.pandas_dtype('U') == np.dtype('U') assert com.pandas_dtype('S') == np.dtype('S') @pytest.mark.parametrize('dtype', [ 'datetime64[ns, US/Eastern]', 'datetime64[ns, Asia/Tokyo]', 'datetime64[ns, UTC]']) def test_datetimetz_dtype(self, dtype): assert com.pandas_dtype(dtype) is DatetimeTZDtype(dtype) assert com.pandas_dtype(dtype) == DatetimeTZDtype(dtype) assert com.pandas_dtype(dtype) == dtype def test_categorical_dtype(self): assert com.pandas_dtype('category') == CategoricalDtype() @pytest.mark.parametrize('dtype', [ 'period[D]', 'period[3M]', 'period[U]', 'Period[D]', 'Period[3M]', 'Period[U]']) def test_period_dtype(self, dtype): assert com.pandas_dtype(dtype) is PeriodDtype(dtype) assert com.pandas_dtype(dtype) == PeriodDtype(dtype) assert com.pandas_dtype(dtype) == dtype dtypes = dict(datetime_tz=com.pandas_dtype('datetime64[ns, US/Eastern]'), datetime=com.pandas_dtype('datetime64[ns]'), timedelta=com.pandas_dtype('timedelta64[ns]'), period=PeriodDtype('D'), integer=np.dtype(np.int64), float=np.dtype(np.float64), object=np.dtype(np.object), category=com.pandas_dtype('category')) @pytest.mark.parametrize('name1,dtype1', list(dtypes.items()), ids=lambda x: str(x)) @pytest.mark.parametrize('name2,dtype2', list(dtypes.items()), ids=lambda x: str(x)) def test_dtype_equal(name1, dtype1, name2, dtype2): # match equal to self, but not equal to other assert com.is_dtype_equal(dtype1, dtype1) if name1 != name2: assert not com.is_dtype_equal(dtype1, dtype2) @pytest.mark.parametrize("dtype1,dtype2", [ (np.int8, np.int64), (np.int16, np.int64), (np.int32, np.int64), (np.float32, np.float64), (PeriodDtype("D"), PeriodDtype("2D")), # PeriodType (com.pandas_dtype("datetime64[ns, US/Eastern]"), com.pandas_dtype("datetime64[ns, CET]")), # Datetime (None, None) # gh-15941: no exception should be raised. ]) def test_dtype_equal_strict(dtype1, dtype2): assert not com.is_dtype_equal(dtype1, dtype2) def get_is_dtype_funcs(): """ Get all functions in pandas.core.dtypes.common that begin with 'is_' and end with 'dtype' """ fnames = [f for f in dir(com) if (f.startswith('is_') and f.endswith('dtype'))] return [getattr(com, fname) for fname in fnames] @pytest.mark.parametrize('func', get_is_dtype_funcs(), ids=lambda x: x.__name__) def test_get_dtype_error_catch(func): # see gh-15941 # # No exception should be raised. assert not func(None) def test_is_object(): assert com.is_object_dtype(object) assert com.is_object_dtype(np.array([], dtype=object)) assert not com.is_object_dtype(int) assert not com.is_object_dtype(np.array([], dtype=int)) assert not com.is_object_dtype([1, 2, 3]) @pytest.mark.parametrize("check_scipy", [ False, pytest.param(True, marks=td.skip_if_no_scipy) ]) def test_is_sparse(check_scipy): assert com.is_sparse(pd.SparseArray([1, 2, 3])) assert com.is_sparse(pd.SparseSeries([1, 2, 3])) assert not com.is_sparse(np.array([1, 2, 3])) if check_scipy: import scipy.sparse assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3])) @td.skip_if_no_scipy def test_is_scipy_sparse(): from scipy.sparse import bsr_matrix assert com.is_scipy_sparse(bsr_matrix([1, 2, 3])) assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3])) assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3])) def test_is_categorical(): cat = pd.Categorical([1, 2, 3]) assert com.is_categorical(cat) assert com.is_categorical(pd.Series(cat)) assert com.is_categorical(pd.CategoricalIndex([1, 2, 3])) assert not com.is_categorical([1, 2, 3]) def test_is_datetimetz(): assert not com.is_datetimetz([1, 2, 3]) assert not com.is_datetimetz(pd.DatetimeIndex([1, 2, 3])) assert com.is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) dtype = DatetimeTZDtype("ns", tz="US/Eastern") s = pd.Series([], dtype=dtype) assert com.is_datetimetz(s) def test_is_period(): assert not com.is_period([1, 2, 3]) assert not com.is_period(pd.Index([1, 2, 3])) assert com.is_period(pd.PeriodIndex(["2017-01-01"], freq="D")) def test_is_datetime64_dtype(): assert not com.is_datetime64_dtype(object) assert not com.is_datetime64_dtype([1, 2, 3]) assert not com.is_datetime64_dtype(np.array([], dtype=int)) assert com.is_datetime64_dtype(np.datetime64) assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64)) def test_is_datetime64tz_dtype(): assert not com.is_datetime64tz_dtype(object) assert not com.is_datetime64tz_dtype([1, 2, 3]) assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) assert com.is_datetime64tz_dtype(pd.DatetimeIndex( [1, 2, 3], tz="US/Eastern")) def test_is_timedelta64_dtype(): assert not com.is_timedelta64_dtype(object) assert not com.is_timedelta64_dtype(None) assert not com.is_timedelta64_dtype([1, 2, 3]) assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64)) assert not com.is_timedelta64_dtype('0 days') assert not com.is_timedelta64_dtype("0 days 00:00:00") assert not com.is_timedelta64_dtype(["0 days 00:00:00"]) assert not com.is_timedelta64_dtype("NO DATE") assert com.is_timedelta64_dtype(np.timedelta64) assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) assert com.is_timedelta64_dtype(pd.to_timedelta(['0 days', '1 days'])) def test_is_period_dtype(): assert not com.is_period_dtype(object) assert not com.is_period_dtype([1, 2, 3]) assert not com.is_period_dtype(pd.Period("2017-01-01")) assert com.is_period_dtype(PeriodDtype(freq="D")) assert com.is_period_dtype(pd.PeriodIndex([], freq="A")) def test_is_interval_dtype(): assert not com.is_interval_dtype(object) assert not com.is_interval_dtype([1, 2, 3]) assert com.is_interval_dtype(IntervalDtype()) interval = pd.Interval(1, 2, closed="right") assert not com.is_interval_dtype(interval) assert com.is_interval_dtype(pd.IntervalIndex([interval])) def test_is_categorical_dtype(): assert not com.is_categorical_dtype(object) assert not com.is_categorical_dtype([1, 2, 3]) assert com.is_categorical_dtype(CategoricalDtype()) assert com.is_categorical_dtype(pd.Categorical([1, 2, 3])) assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) def test_is_string_dtype(): assert not com.is_string_dtype(int) assert not com.is_string_dtype(pd.Series([1, 2])) assert com.is_string_dtype(str) assert com.is_string_dtype(object) assert com.is_string_dtype(np.array(['a', 'b'])) def test_is_period_arraylike(): assert not com.is_period_arraylike([1, 2, 3]) assert not com.is_period_arraylike(pd.Index([1, 2, 3])) assert com.is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D")) def test_is_datetime_arraylike(): assert not com.is_datetime_arraylike([1, 2, 3]) assert not com.is_datetime_arraylike(pd.Index([1, 2, 3])) assert com.is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3])) def test_is_datetimelike(): assert not com.is_datetimelike([1, 2, 3]) assert not com.is_datetimelike(pd.Index([1, 2, 3])) assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3])) assert com.is_datetimelike(pd.PeriodIndex([], freq="A")) assert com.is_datetimelike(np.array([], dtype=np.datetime64)) assert com.is_datetimelike(pd.Series([], dtype="timedelta64[ns]")) assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) dtype = DatetimeTZDtype("ns", tz="US/Eastern") s = pd.Series([], dtype=dtype) assert com.is_datetimelike(s) def test_is_integer_dtype(): assert not com.is_integer_dtype(str) assert not com.is_integer_dtype(float) assert not com.is_integer_dtype(np.datetime64) assert not com.is_integer_dtype(np.timedelta64) assert not com.is_integer_dtype(pd.Index([1, 2.])) assert not com.is_integer_dtype(np.array(['a', 'b'])) assert not com.is_integer_dtype(np.array([], dtype=np.timedelta64)) assert com.is_integer_dtype(int) assert com.is_integer_dtype(np.uint64) assert com.is_integer_dtype(pd.Series([1, 2])) def test_is_signed_integer_dtype(): assert not com.is_signed_integer_dtype(str) assert not com.is_signed_integer_dtype(float) assert not com.is_signed_integer_dtype(np.uint64) assert not com.is_signed_integer_dtype(np.datetime64) assert not com.is_signed_integer_dtype(np.timedelta64) assert not com.is_signed_integer_dtype(pd.Index([1, 2.])) assert not com.is_signed_integer_dtype(np.array(['a', 'b'])) assert not com.is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) assert not com.is_signed_integer_dtype(np.array([], dtype=np.timedelta64)) assert com.is_signed_integer_dtype(int) assert com.is_signed_integer_dtype(pd.Series([1, 2])) def test_is_unsigned_integer_dtype(): assert not com.is_unsigned_integer_dtype(str) assert not com.is_unsigned_integer_dtype(int) assert not com.is_unsigned_integer_dtype(float) assert not com.is_unsigned_integer_dtype(pd.Series([1, 2])) assert not com.is_unsigned_integer_dtype(pd.Index([1, 2.])) assert not com.is_unsigned_integer_dtype(np.array(['a', 'b'])) assert com.is_unsigned_integer_dtype(np.uint64) assert com.is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) def test_is_int64_dtype(): assert not com.is_int64_dtype(str) assert not com.is_int64_dtype(float) assert not com.is_int64_dtype(np.int32) assert not com.is_int64_dtype(np.uint64) assert not com.is_int64_dtype(pd.Index([1, 2.])) assert not com.is_int64_dtype(np.array(['a', 'b'])) assert not com.is_int64_dtype(np.array([1, 2], dtype=np.uint32)) assert com.is_int64_dtype(np.int64) assert com.is_int64_dtype(np.array([1, 2], dtype=np.int64)) def test_is_int_or_datetime_dtype(): assert not com.is_int_or_datetime_dtype(str) assert not com.is_int_or_datetime_dtype(float) assert not com.is_int_or_datetime_dtype(pd.Index([1, 2.])) assert not com.is_int_or_datetime_dtype(np.array(['a', 'b'])) assert com.is_int_or_datetime_dtype(int) assert com.is_int_or_datetime_dtype(np.uint64) assert com.is_int_or_datetime_dtype(np.datetime64) assert com.is_int_or_datetime_dtype(np.timedelta64) assert com.is_int_or_datetime_dtype(pd.Series([1, 2])) assert com.is_int_or_datetime_dtype(np.array([], dtype=np.datetime64)) assert com.is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64)) def test_is_datetime64_any_dtype(): assert not com.is_datetime64_any_dtype(int) assert not com.is_datetime64_any_dtype(str) assert not com.is_datetime64_any_dtype(np.array([1, 2])) assert not com.is_datetime64_any_dtype(np.array(['a', 'b'])) assert com.is_datetime64_any_dtype(np.datetime64) assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64)) assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern")) assert com.is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype=np.datetime64)) def test_is_datetime64_ns_dtype(): assert not com.is_datetime64_ns_dtype(int) assert not com.is_datetime64_ns_dtype(str) assert not com.is_datetime64_ns_dtype(np.datetime64) assert not com.is_datetime64_ns_dtype(np.array([1, 2])) assert not com.is_datetime64_ns_dtype(np.array(['a', 'b'])) assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) # This datetime array has the wrong unit (ps instead of ns) assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) assert com.is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=np.datetime64)) def test_is_timedelta64_ns_dtype(): assert not com.is_timedelta64_ns_dtype(np.dtype('m8[ps]')) assert not com.is_timedelta64_ns_dtype( np.array([1, 2], dtype=np.timedelta64)) assert com.is_timedelta64_ns_dtype(np.dtype('m8[ns]')) assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]')) def test_is_datetime_or_timedelta_dtype(): assert not com.is_datetime_or_timedelta_dtype(int) assert not com.is_datetime_or_timedelta_dtype(str) assert not com.is_datetime_or_timedelta_dtype(pd.Series([1, 2])) assert not com.is_datetime_or_timedelta_dtype(np.array(['a', 'b'])) assert not com.is_datetime_or_timedelta_dtype( DatetimeTZDtype("ns", "US/Eastern")) assert com.is_datetime_or_timedelta_dtype(np.datetime64) assert com.is_datetime_or_timedelta_dtype(np.timedelta64) assert com.is_datetime_or_timedelta_dtype( np.array([], dtype=np.timedelta64)) assert com.is_datetime_or_timedelta_dtype( np.array([], dtype=np.datetime64)) def test_is_numeric_v_string_like(): assert not com.is_numeric_v_string_like(1, 1) assert not com.is_numeric_v_string_like(1, "foo") assert not com.is_numeric_v_string_like("foo", "foo") assert not com.is_numeric_v_string_like(np.array([1]), np.array([2])) assert not com.is_numeric_v_string_like( np.array(["foo"]), np.array(["foo"])) assert com.is_numeric_v_string_like(np.array([1]), "foo") assert com.is_numeric_v_string_like("foo", np.array([1])) assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) def test_is_datetimelike_v_numeric(): dt = np.datetime64(pd.datetime(2017, 1, 1)) assert not com.is_datetimelike_v_numeric(1, 1) assert not com.is_datetimelike_v_numeric(dt, dt) assert not com.is_datetimelike_v_numeric(np.array([1]), np.array([2])) assert not com.is_datetimelike_v_numeric(np.array([dt]), np.array([dt])) assert com.is_datetimelike_v_numeric(1, dt) assert com.is_datetimelike_v_numeric(1, dt) assert com.is_datetimelike_v_numeric(np.array([dt]), 1) assert com.is_datetimelike_v_numeric(np.array([1]), dt) assert com.is_datetimelike_v_numeric(np.array([dt]), np.array([1])) def test_is_datetimelike_v_object(): obj = object() dt = np.datetime64(pd.datetime(2017, 1, 1)) assert not com.is_datetimelike_v_object(dt, dt) assert not com.is_datetimelike_v_object(obj, obj) assert not com.is_datetimelike_v_object(np.array([dt]), np.array([1])) assert not com.is_datetimelike_v_object(np.array([dt]), np.array([dt])) assert not com.is_datetimelike_v_object(np.array([obj]), np.array([obj])) assert com.is_datetimelike_v_object(dt, obj) assert com.is_datetimelike_v_object(obj, dt) assert com.is_datetimelike_v_object(np.array([dt]), obj) assert com.is_datetimelike_v_object(np.array([obj]), dt) assert com.is_datetimelike_v_object(np.array([dt]), np.array([obj])) def test_needs_i8_conversion(): assert not com.needs_i8_conversion(str) assert not com.needs_i8_conversion(np.int64) assert not com.needs_i8_conversion(pd.Series([1, 2])) assert not com.needs_i8_conversion(np.array(['a', 'b'])) assert com.needs_i8_conversion(np.datetime64) assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) assert com.needs_i8_conversion(pd.DatetimeIndex( [1, 2, 3], tz="US/Eastern")) def test_is_numeric_dtype(): assert not com.is_numeric_dtype(str) assert not com.is_numeric_dtype(np.datetime64) assert not com.is_numeric_dtype(np.timedelta64) assert not com.is_numeric_dtype(np.array(['a', 'b'])) assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64)) assert com.is_numeric_dtype(int) assert com.is_numeric_dtype(float) assert com.is_numeric_dtype(np.uint64) assert com.is_numeric_dtype(pd.Series([1, 2])) assert com.is_numeric_dtype(pd.Index([1, 2.])) def test_is_string_like_dtype(): assert not com.is_string_like_dtype(object) assert not com.is_string_like_dtype(pd.Series([1, 2])) assert com.is_string_like_dtype(str) assert com.is_string_like_dtype(np.array(['a', 'b'])) def test_is_float_dtype(): assert not com.is_float_dtype(str) assert not com.is_float_dtype(int) assert not com.is_float_dtype(pd.Series([1, 2])) assert not com.is_float_dtype(np.array(['a', 'b'])) assert com.is_float_dtype(float) assert com.is_float_dtype(pd.Index([1, 2.])) def test_is_bool_dtype(): assert not com.is_bool_dtype(int) assert not com.is_bool_dtype(str) assert not com.is_bool_dtype(pd.Series([1, 2])) assert not com.is_bool_dtype(np.array(['a', 'b'])) assert not com.is_bool_dtype(pd.Index(['a', 'b'])) assert com.is_bool_dtype(bool) assert com.is_bool_dtype(np.bool) assert com.is_bool_dtype(np.array([True, False])) assert com.is_bool_dtype(pd.Index([True, False])) @pytest.mark.parametrize("check_scipy", [ False, pytest.param(True, marks=td.skip_if_no_scipy) ]) def test_is_extension_type(check_scipy): assert not com.is_extension_type([1, 2, 3]) assert not com.is_extension_type(np.array([1, 2, 3])) assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3])) cat = pd.Categorical([1, 2, 3]) assert com.is_extension_type(cat) assert com.is_extension_type(pd.Series(cat)) assert com.is_extension_type(pd.SparseArray([1, 2, 3])) assert com.is_extension_type(pd.SparseSeries([1, 2, 3])) assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) dtype = DatetimeTZDtype("ns", tz="US/Eastern") s = pd.Series([], dtype=dtype) assert com.is_extension_type(s) if check_scipy: import scipy.sparse assert not com.is_extension_type(scipy.sparse.bsr_matrix([1, 2, 3])) def test_is_complex_dtype(): assert not com.is_complex_dtype(int) assert not com.is_complex_dtype(str) assert not com.is_complex_dtype(pd.Series([1, 2])) assert not com.is_complex_dtype(np.array(['a', 'b'])) assert com.is_complex_dtype(np.complex) assert com.is_complex_dtype(np.array([1 + 1j, 5])) def test_is_offsetlike(): assert com.is_offsetlike(np.array([pd.DateOffset(month=3), pd.offsets.Nano()])) assert com.is_offsetlike(pd.offsets.MonthEnd()) assert com.is_offsetlike(pd.Index([pd.DateOffset(second=1)])) assert not com.is_offsetlike(pd.Timedelta(1)) assert not com.is_offsetlike(np.array([1 + 1j, 5])) # mixed case assert not com.is_offsetlike(np.array([pd.DateOffset(), pd.Timestamp(0)])) @pytest.mark.parametrize('input_param,result', [ (int, np.dtype(int)), ('int32', np.dtype('int32')), (float, np.dtype(float)), ('float64', np.dtype('float64')), (np.dtype('float64'), np.dtype('float64')), (str, np.dtype(str)), (pd.Series([1, 2], dtype=np.dtype('int16')), np.dtype('int16')), (pd.Series(['a', 'b']), np.dtype(object)), (pd.Index([1, 2]), np.dtype('int64')), (pd.Index(['a', 'b']), np.dtype(object)), ('category', 'category'), (pd.Categorical(['a', 'b']).dtype, CategoricalDtype(['a', 'b'])), (pd.Categorical(['a', 'b']), CategoricalDtype(['a', 'b'])), (pd.CategoricalIndex(['a', 'b']).dtype, CategoricalDtype(['a', 'b'])), (pd.CategoricalIndex(['a', 'b']), CategoricalDtype(['a', 'b'])), (CategoricalDtype(), CategoricalDtype()), (CategoricalDtype(['a', 'b']), CategoricalDtype()), (pd.DatetimeIndex([1, 2]), np.dtype('=M8[ns]')), (pd.DatetimeIndex([1, 2]).dtype, np.dtype('=M8[ns]')), ('<M8[ns]', np.dtype('<M8[ns]')), ('datetime64[ns, Europe/London]', DatetimeTZDtype('ns', 'Europe/London')), (pd.SparseSeries([1, 2], dtype='int32'), SparseDtype('int32')), (pd.SparseSeries([1, 2], dtype='int32').dtype, SparseDtype('int32')), (PeriodDtype(freq='D'), PeriodDtype(freq='D')), ('period[D]', PeriodDtype(freq='D')), (IntervalDtype(), IntervalDtype()), ]) def test__get_dtype(input_param, result): assert com._get_dtype(input_param) == result @pytest.mark.parametrize('input_param', [None, 1, 1.2, 'random string', pd.DataFrame([1, 2])]) def test__get_dtype_fails(input_param): # python objects pytest.raises(TypeError, com._get_dtype, input_param) @pytest.mark.parametrize('input_param,result', [ (int, np.dtype(int).type), ('int32', np.int32), (float, np.dtype(float).type), ('float64', np.float64), (np.dtype('float64'), np.float64), (str, np.dtype(str).type), (pd.Series([1, 2], dtype=np.dtype('int16')), np.int16), (pd.Series(['a', 'b']), np.object_), (pd.Index([1, 2], dtype='int64'), np.int64), (pd.Index(['a', 'b']), np.object_), ('category', com.CategoricalDtypeType), (pd.Categorical(['a', 'b']).dtype, com.CategoricalDtypeType), (pd.Categorical(['a', 'b']), com.CategoricalDtypeType), (pd.CategoricalIndex(['a', 'b']).dtype, com.CategoricalDtypeType), (pd.CategoricalIndex(['a', 'b']), com.CategoricalDtypeType), (pd.DatetimeIndex([1, 2]), np.datetime64), (pd.DatetimeIndex([1, 2]).dtype, np.datetime64), ('<M8[ns]', np.datetime64), (pd.DatetimeIndex([1, 2], tz='Europe/London'), pd.Timestamp), (pd.DatetimeIndex([1, 2], tz='Europe/London').dtype, pd.Timestamp), ('datetime64[ns, Europe/London]', pd.Timestamp), (pd.SparseSeries([1, 2], dtype='int32'), np.int32), (pd.SparseSeries([1, 2], dtype='int32').dtype, np.int32), (PeriodDtype(freq='D'), pd.Period), ('period[D]', pd.Period), (IntervalDtype(), pd.Interval), (None, type(None)), (1, type(None)), (1.2, type(None)), (pd.DataFrame([1, 2]), type(None)), # composite dtype ]) def test__get_dtype_type(input_param, result): assert com._get_dtype_type(input_param) == result
bsd-3-clause
brguez/TEIBA
src/python/genomic_distribution_cor.py
1
4598
#!/usr/bin/env python #coding: utf-8 #### FUNCTIONS #### def header(string): """ Display header """ timeInfo = time.strftime("%Y-%m-%d %H:%M") print '\n', timeInfo, "****", string, "****" def subHeader(string): """ Display subheader """ timeInfo = time.strftime("%Y-%m-%d %H:%M") print timeInfo, "**", string, "**" def info(string): """ Display basic information """ timeInfo = time.strftime("%Y-%m-%d %H:%M") print timeInfo, string def scatterCorr(arrayA, arrayB, threshold, outPath): """ Interpretation of strength of correlation very weak: < 0,15 weak: 0,15-0,25 moderate: 0,25-0,40 strong: 0,40-0,75 very strong: >0,75 """ corr = stats.spearmanr(arrayA, arrayB) coefficient = float(format(corr[0], '.3f')) pvalue = float(corr[1]) print "pvalue: ", pvalue ## Make scatterplot if rho >= threshold or <= -theshold if (coefficient >= threshold) or (coefficient <= -threshold): # Make scatterplot fig = plt.figure(figsize=(6,6)) ax1 = fig.add_subplot(1, 1, 1) #plot = sns.jointplot(x=arrayA, y=arrayB, kind="hex", xlim=(0,40), gridsize=50, dropna=True, cmap="Blues", stat_func=spearmanr) plot = sns.jointplot(x=arrayA, y=arrayB, kind="kde", space=0, xlim=(0,30), gridsize=50, dropna=True, cmap="Blues", stat_func=spearmanr) plt.xlabel('# L1', fontsize=12) plt.ylabel('Replication time', fontsize=12) # sns.plt.subplots_adjust(left=0.2, right=0.8, top=0.8, bottom=0.2) # shrink fig so cbar is visible # cax = plot.fig.add_axes([.85, .25, .05, .4]) # x, y, width, height # sns.plt.colorbar(cax=cax) #sns.jointplot(x=arrayA, y=arrayB, kind="kde", space=0, color="b", xlim=(0,30)) ## Save figure fileName = outPath + '_' + str(coefficient) + '_correlation.pdf' plt.savefig(fileName) return coefficient, pvalue #### MAIN #### ## Import modules ## import argparse import sys import os.path import formats import time import scipy.stats as stats import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from scipy.stats import spearmanr ## Graphic style ## sns.set_style("white") sns.set_style("ticks") #sns.set(font="Verdana") ## Get user's input ## parser = argparse.ArgumentParser(description="Compute correlation between L1 retrotransposition rate and diverse genomic features (replication time, gene expression and gene density)") parser.add_argument('input', help='Genomic features table') parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' ) args = parser.parse_args() inputFile = args.input outDir = args.outDir scriptName = os.path.basename(sys.argv[0]) ## Display configuration to standard output ## print print "***** ", scriptName, " configuration *****" print "inputFile: ", inputFile print "outDir: ", outDir print print "***** Executing ", scriptName, ".... *****" print ## Start ##  #### 1. Load input tables: ########################## inputDf = pd.read_csv(inputFile, header=0, sep='\t') print "inputDf: ", inputDf #### 2. Number L1 insertions and L1 endonuclease motif correlation ################################################################### ## Remove bins with a number of L1 EN motifs == 0 # These will mostly correspond to telomeric, centromeric regions filteredDf = inputDf[inputDf["nbL1Motif"] > 0] ## Make plot fig = plt.figure(figsize=(6,6)) ax1 = fig.add_subplot(1, 1, 1) plot = sns.jointplot("nbL1", "nbL1Motif", data=filteredDf, xlim=(0,30), kind="kde", space=0, dropna=True, cmap="Blues", stat_func=spearmanr) ## Save figure outPath = outDir + '/nbL1_nbL1Motif_corr.pdf' plt.savefig(outPath) outPath = outDir + '/nbL1_nbL1Motif_corr.svg' plt.savefig(outPath) #### 3. Number L1 insertions and median replication time correlation ##################################################################### ## Remove bins with NA values for replication timing (one bin with NA expression) filteredDf = inputDf.dropna(subset=["medianRT"]) ## Make plot fig = plt.figure(figsize=(6,6)) ax1 = fig.add_subplot(1, 1, 1) plot = sns.jointplot("nbL1", "medianRT", data=filteredDf, xlim=(0,30), kind="kde", space=0, dropna=True, cmap="Blues", stat_func=spearmanr) ## Save figure outPath = outDir + '/nbL1_medianRT_corr.pdf' plt.savefig(outPath) outPath = outDir + '/nbL1_medianRT_corr.svg' plt.savefig(outPath) #### header("Finished")
gpl-3.0
jseabold/scipy
scipy/stats/morestats.py
6
87719
# Author: Travis Oliphant, 2002 # # Further updates and enhancements by many SciPy developers. # from __future__ import division, print_function, absolute_import import math import warnings from collections import namedtuple import numpy as np from numpy import (isscalar, r_, log, sum, around, unique, asarray, zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil, floor, array, poly1d, compress, pi, exp, ravel, angle, count_nonzero) from numpy.testing.decorators import setastest from scipy._lib.six import string_types from scipy import optimize from scipy import special from . import statlib from . import stats from .stats import find_repeats from .contingency import chi2_contingency from . import distributions from ._distn_infrastructure import rv_generic __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'wilcoxon', 'median_test', 'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp' ] def bayes_mvs(data, alpha=0.90): """ Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://hdl.handle.net/1877/438, 2006. """ m, v, s = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) Mean = namedtuple('Mean', ('statistic', 'minmax')) Variance = namedtuple('Variance', ('statistic', 'minmax')) Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) m_res = Mean(m.mean(), m.interval(alpha)) v_res = Variance(v.mean(), v.interval(alpha)) s_res = Std_dev(s.mean(), s.interval(alpha)) return m_res, v_res, s_res def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data vdist : "frozen" distribution object Distribution object representing the variance of the data sdist : "frozen" distribution object Distribution object representing the standard deviation of the data Notes ----- The return values from bayes_mvs(data) is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. Examples -------- >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if n < 2: raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if n > 1000: # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) else: nm1 = n - 1 fac = n * C / 2. val = nm1 / 2. mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) vdist = distributions.invgamma(val, scale=fac) return mdist, vdist, sdist def kstat(data, n=2): """ Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. Notes ----- The cumulants are related to central moments but are specifically defined using a power series expansion of the logarithm of the characteristic function (which is the Fourier transform of the PDF). In particular let phi(t) be the characteristic function, then:: ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf) The first few cumulants (kappa_n) in terms of central moments (mu_n) are:: kappa_1 = mu_1 kappa_2 = mu_2 kappa_3 = mu_3 kappa_4 = mu_4 - 3*mu_2**2 kappa_5 = mu_5 - 10*mu_2 * mu_3 References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = zeros(n + 1, 'd') data = ravel(data) N = len(data) for k in range(1, n + 1): S[k] = sum(data**k, axis=0) if n == 1: return S[1] * 1.0/N elif n == 2: return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) elif n == 3: return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) elif n == 4: return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / (N*(N-1.0)*(N-2.0)*(N-3.0))) else: raise ValueError("Should not be here.") def kstatvar(data, n=2): """ Returns an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat """ data = ravel(data) N = len(data) if n == 1: return kstat(data, n=2) * 1.0/N elif n == 2: k2 = kstat(data, n=2) k4 = kstat(data, n=4) return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def _calc_uniform_order_statistic_medians(x): """See Notes section of `probplot` for details.""" N = len(x) osm_uniform = np.zeros(N, dtype=np.float64) osm_uniform[-1] = 0.5**(1.0 / N) osm_uniform[0] = 1 - osm_uniform[-1] i = np.arange(2, N) osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365) return osm_uniform def _parse_dist_kw(dist, enforce_subclass=True): """Parse `dist` keyword. Parameters ---------- dist : str or stats.distributions instance. Several functions take `dist` as a keyword, hence this utility function. enforce_subclass : bool, optional If True (default), `dist` needs to be a `_distn_infrastructure.rv_generic` instance. It can sometimes be useful to set this keyword to False, if a function wants to accept objects that just look somewhat like such an instance (for example, they have a ``ppf`` method). """ if isinstance(dist, rv_generic): pass elif isinstance(dist, string_types): try: dist = getattr(distributions, dist) except AttributeError: raise ValueError("%s is not a valid distribution name" % dist) elif enforce_subclass: msg = ("`dist` should be a stats.distributions instance or a string " "with the name of such a distribution.") raise ValueError(msg) return dist def _add_axis_labels_title(plot, xlabel, ylabel, title): """Helper function to add axes labels and a title to stats plots""" try: if hasattr(plot, 'set_title'): # Matplotlib Axes instance or something that looks like it plot.set_title(title) plot.set_xlabel(xlabel) plot.set_ylabel(ylabel) else: # matplotlib.pyplot module plot.title(title) plot.xlabel(xlabel) plot.ylabel(ylabel) except: # Not an MPL object or something that looks (enough) like it. # Don't crash on adding labels or title pass def probplot(x, sparams=(), dist='norm', fit=True, plot=None): """ Calculate quantiles for a probability plot, and optionally show the plot. Generates a probability plot of sample data against the quantiles of a specified theoretical distribution (the normal distribution by default). `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (shape parameters plus location and scale). dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). `osr` is simply sorted input `x`. For details on how `osm` is calculated see the Notes section. (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. `probplot` generates a probability plot, which should not be confused with a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this type, see ``statsmodels.api.ProbPlot``. The formula used for the theoretical quantiles (horizontal axis of the probability plot) is Filliben's estimate:: quantiles = dist.ppf(val), for 0.5**(1/n), for i = n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 1 - 0.5**(1/n), for i = 1 where ``i`` indicates the i-th ordered value and ``n`` is the total number of values. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> nsample = 100 >>> np.random.seed(7654321) A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample) >>> res = stats.probplot(x, plot=plt) A mixture of two normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample/2.,2)).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) >>> res = stats.probplot(x, plot=plt) Produce a new figure with a loggamma distribution, using the ``dist`` and ``sparams`` keywords: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> x = stats.loggamma.rvs(c=2.5, size=500) >>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") Show the results with Matplotlib: >>> plt.show() """ x = np.asarray(x) _perform_fit = fit or (plot is not None) if x.size == 0: if _perform_fit: return (x, x), (np.nan, np.nan, 0.0) else: return x, x osm_uniform = _calc_uniform_order_statistic_medians(x) dist = _parse_dist_kw(dist, enforce_subclass=False) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) osm = dist.ppf(osm_uniform, *sparams) osr = sort(x) if _perform_fit: # perform a linear least squares fit. slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') _add_axis_labels_title(plot, xlabel='Quantiles', ylabel='Ordered Values', title='Probability Plot') # Add R^2 value to the plot as text xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "$R^2=%1.4f$" % r**2) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): """ Calculate the shape parameter that maximizes the PPCC The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. ppcc_max returns the shape parameter that would maximize the probability plot correlation coefficient for the given data to a one-parameter family of distributions. Parameters ---------- x : array_like Input array. brack : tuple, optional Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) then they are assumed to be a starting interval for a downhill bracket search (see `scipy.optimize.brent`). dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. Returns ------- shape_value : float The shape parameter at which the probability plot correlation coefficient reaches its max value. See also -------- ppcc_plot, probplot, boxcox Notes ----- The brack keyword serves as a starting point which is useful in corner cases. One can use a plot to obtain a rough visual estimate of the location for the maximum to start the search near it. References ---------- .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, ... random_state=1234567) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(8, 6)) >>> ax = fig.add_subplot(111) >>> stats.ppcc_plot(x, -5, 5, plot=ax) We calculate the value where the shape should reach its maximum and a red line is drawn there. The line should coincide with the highest point in the ppcc_plot. >>> max = stats.ppcc_max(x) >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ dist = _parse_dist_kw(dist) osm_uniform = _calc_uniform_order_statistic_medians(x) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): """ Calculate and optionally plot probability plot correlation coefficient. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. It cannot be used for distributions without shape parameters (like the normal distribution) or with multiple shape parameters. By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed distributions via an approximately normal one, and is therefore particularly useful in practice. Parameters ---------- x : array_like Input array. a, b: scalar Lower and upper bounds of the shape parameter to use. dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. plot : object, optional If given, plots PPCC against the shape parameter. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `a` to `b`). Returns ------- svals : ndarray The shape values for which `ppcc` was calculated. ppcc : ndarray The calculated probability plot correlation coefficient values. See also -------- ppcc_max, probplot, boxcox_normplot, tukeylambda References ---------- J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234567) >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> fig = plt.figure(figsize=(12, 4)) >>> ax1 = fig.add_subplot(131) >>> ax2 = fig.add_subplot(132) >>> ax3 = fig.add_subplot(133) >>> stats.probplot(x, plot=ax1) >>> stats.boxcox_normplot(x, -5, 5, plot=ax2) >>> stats.ppcc_plot(x, -5, 5, plot=ax3) >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ if b <= a: raise ValueError("`b` has to be larger than `a`.") svals = np.linspace(a, b, num=N) ppcc = np.empty_like(svals) for k, sval in enumerate(svals): _, r2 = probplot(x, sval, dist=dist, fit=True) ppcc[k] = r2[-1] if plot is not None: plot.plot(svals, ppcc, 'x') _add_axis_labels_title(plot, xlabel='Shape Values', ylabel='Prob Plot Corr. Coef.', title='(%s) PPCC Plot' % dist) return svals, ppcc def boxcox_llf(lmb, data): r"""The boxcox log-likelihood function. Parameters ---------- lmb : scalar Parameter for Box-Cox transformation. See `boxcox` for details. data : array_like Data to calculate Box-Cox log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float or ndarray Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, an array otherwise. See Also -------- boxcox, probplot, boxcox_normplot, boxcox_normmax Notes ----- The Box-Cox log-likelihood function is defined here as .. math:: llf = (\lambda - 1) \sum_i(\log(x_i)) - N/2 \log(\sum_i (y_i - \bar{y})^2 / N), where ``y`` is the Box-Cox transformed input data ``x``. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes >>> np.random.seed(1245) Generate some random variates and calculate Box-Cox log-likelihood values for them for a range of ``lmbda`` values: >>> x = stats.loggamma.rvs(5, loc=10, size=1000) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=np.float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.boxcox_llf(lmbda, x) Also find the optimal lmbda value with `boxcox`: >>> x_most_normal, lmbda_optimal = stats.boxcox(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Box-Cox log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `boxcox` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.boxcox(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) N = data.shape[0] if N == 0: return np.nan y = boxcox(data, lmb) y_mean = np.mean(y, axis=0) llf = (lmb - 1) * np.sum(np.log(data), axis=0) llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) return llf def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) target = boxcox_llf(lmax, x) - fac def rootfunc(lmbda, data, target): return boxcox_llf(lmbda, data) - target # Find positive endpoint of interval in which answer is to be found newlm = lmax + 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm += 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) # Now find negative interval in the same way newlm = lmax - 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm -= 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) return lmminus, lmplus def boxcox(x, lmbda=None, alpha=None): r""" Return a positive dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. Must be between 0.0 and 1.0. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and ``alpha`` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given ``alpha``. See Also -------- probplot, boxcox_normplot, boxcox_normmax, boxcox_llf Notes ----- The Box-Cox transform is given by:: y = (x**lmbda - 1) / lmbda, for lmbda > 0 log(x), for lmbda = 0 `boxcox` requires the input data to be positive. Sometimes a Box-Cox transformation provides a shift parameter to achieve this; `boxcox` does not. Such a shift parameter is equivalent to adding a positive constant to `x` before calling `boxcox`. The confidence limits returned when ``alpha`` is provided give the interval where: .. math:: llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared function. References ---------- G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if any(x <= 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation return special.boxcox(x, lmbda) # If lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = boxcox_normmax(x, method='mle') y = boxcox(x, lmax) if alpha is None: return y, lmax else: # Find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): """Compute optimal Box-Cox transform parameter for input data. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional The starting interval for a downhill bracket search with `optimize.brent`. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. method : str, optional The method to determine the optimal transform parameter (`boxcox` ``lmbda`` parameter). Options are: 'pearsonr' (default) Maximizes the Pearson correlation coefficient between ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be normally-distributed. 'mle' Minimizes the log-likelihood `boxcox_llf`. This is the method used in `boxcox`. 'all' Use all optimization methods available, and return all results. Useful to compare different methods. Returns ------- maxlog : float or ndarray The optimal transform parameter found. An array instead of a scalar for ``method='all'``. See Also -------- boxcox, boxcox_llf, boxcox_normplot Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) # make this example reproducible Generate some data and determine optimal ``lmbda`` in various ways: >>> x = stats.loggamma.rvs(5, size=30) + 5 >>> y, lmax_mle = stats.boxcox(x) >>> lmax_pearsonr = stats.boxcox_normmax(x) >>> lmax_mle 7.177... >>> lmax_pearsonr 7.916... >>> stats.boxcox_normmax(x, method='all') array([ 7.91667384, 7.17718692]) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> stats.boxcox_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax_mle, color='r') >>> ax.axvline(lmax_pearsonr, color='g', ls='--') >>> plt.show() """ def _pearsonr(x, brack): osm_uniform = _calc_uniform_order_statistic_medians(x) xvals = distributions.norm.ppf(osm_uniform) def _eval_pearsonr(lmbda, xvals, samps): # This function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) and # returns ``1 - r`` so that a minimization function maximizes the # correlation. y = boxcox(samps, lmbda) yvals = np.sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) def _mle(x, brack): def _eval_mle(lmb, data): # function to minimize return -boxcox_llf(lmb, data) return optimize.brent(_eval_mle, brack=brack, args=(x,)) def _all(x, brack): maxlog = np.zeros(2, dtype=np.float) maxlog[0] = _pearsonr(x, brack) maxlog[1] = _mle(x, brack) return maxlog methods = {'pearsonr': _pearsonr, 'mle': _mle, 'all': _all} if method not in methods.keys(): raise ValueError("Method %s not recognized." % method) optimfunc = methods[method] return optimfunc(x, brack) def boxcox_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in `boxcox` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` for Box-Cox transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Box-Cox transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Box-Cox plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> stats.boxcox_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if lb <= la: raise ValueError("`lb` has to be larger than `la`.") lmbdas = np.linspace(la, lb, num=N) ppcc = lmbdas * 0.0 for i, val in enumerate(lmbdas): # Determine for each lmbda the correlation coefficient of transformed x z = boxcox(x, lmbda=val) _, r2 = probplot(z, dist='norm', fit=True) ppcc[i] = r2[-1] if plot is not None: plot.plot(lmbdas, ppcc, 'x') _add_axis_labels_title(plot, xlabel='$\lambda$', ylabel='Prob Plot Corr. Coef.', title='Box-Cox Normality Plot') return lmbdas, ppcc def shapiro(x, a=None, reta=False): """ Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. a : array_like, optional Array of internal parameters used in the calculation. If these are not given, they will be computed internally. If x has length n, then a must have length n/2. reta : bool, optional Whether or not to return the internally computed a values. The default is False. Returns ------- W : float The test statistic. p-value : float The p-value for the hypothesis test. a : array_like, optional If `reta` is True, then these are the internally computed "a" values that may be passed into this function on future calls. See Also -------- anderson : The Anderson-Darling test for normality References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm """ N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") if a is None: a = zeros(N, 'f') init = 0 else: if len(a) != N // 2: raise ValueError("len(a) must equal len(x)/2") init = 1 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if ifault not in [0, 2]: warnings.warn(str(ifault)) if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") if reta: return w, pw, a else: return w, pw # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of he American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) def anderson(x, dist='norm'): """ Anderson-Darling test for data coming from a particular distribution The Anderson-Darling test is a modification of the Kolmogorov- Smirnov test `kstest` for the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like array of sample data dist : {'norm','expon','logistic','gumbel','extreme1'}, optional the type of distribution to test against. The default is 'norm' and 'extreme1' is a synonym for 'gumbel' Returns ------- statistic : float The Anderson-Darling test statistic critical_values : list The critical values for this distribution significance_level : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. Notes ----- Critical values provided are for the following significance levels: normal/exponenential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If A2 is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y - xbar) / s z = distributions.norm.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) elif dist == 'expon': w = y / xbar z = distributions.expon.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_expon / (1.0 + 0.6/N), 3) elif dist == 'logistic': def rootfunc(ab, xj, N): a, b = ab tmp = (xj - a) / b tmp2 = exp(tmp) val = [sum(1.0/(1+tmp2), axis=0) - 0.5*N, sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] return array(val) sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) w = (y - sol[0]) / sol[1] z = distributions.logistic.cdf(w) sig = array([25, 10, 5, 2.5, 1, 0.5]) critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) else: # (dist == 'gumbel') or (dist == 'extreme1'): xbar, s = distributions.gumbel_l.fit(x) w = (y - xbar) / s z = distributions.gumbel_l.cdf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) i = arange(1, N + 1) A2 = -N - sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0) AndersonResult = namedtuple('AndersonResult', ('statistic', 'critical_values', 'significance_level')) return AndersonResult(A2, critical, sig) def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987. """ A2akN = 0. Z_ssorted_left = Z.searchsorted(Zstar, 'left') if N == Zstar.size: lj = 1. else: lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left Bj = Z_ssorted_left + lj / 2. for i in arange(0, k): s = np.sort(samples[i]) s_ssorted_right = s.searchsorted(Zstar, side='right') Mij = s_ssorted_right.astype(np.float) fij = s_ssorted_right - s.searchsorted(Zstar, 'left') Mij -= fij / 2. inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) A2akN += inner.sum() / n[i] A2akN *= (N - 1.) / N return A2akN def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987. """ A2kN = 0. lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left') Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:-1], side='right') inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) A2kN += inner.sum() / n[i] return A2kN def anderson_ksamp(samples, midrank=True): """The Anderson-Darling test for k-samples. The k-sample Anderson-Darling test is a modification of the one-sample Anderson-Darling test. It tests the null hypothesis that k-samples are drawn from the same population without having to specify the distribution function of that population. The critical values depend on the number of samples. Parameters ---------- samples : sequence of 1-D array_like Array of sample data in arrays. midrank : bool, optional Type of Anderson-Darling test which is computed. Default (True) is the midrank test applicable to continuous and discrete populations. If False, the right side empirical distribution is used. Returns ------- statistic : float Normalized k-sample Anderson-Darling test statistic. critical_values : array The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. significance_level : float An approximate significance level at which the null hypothesis for the provided samples can be rejected. Raises ------ ValueError If less than 2 samples are provided, a sample is empty, or no distinct observations are in the samples. See Also -------- ks_2samp : 2 sample Kolmogorov-Smirnov test anderson : 1 sample Anderson-Darling test Notes ----- [1]_ Defines three versions of the k-sample Anderson-Darling test: one for continuous distributions and two for discrete distributions, in which ties between samples may occur. The default of this routine is to compute the version based on the midrank empirical distribution function. This test is applicable to continuous and discrete data. If midrank is set to False, the right side empirical distribution is used for a test for discrete data. According to [1]_, the two discrete test statistics differ only slightly if a few collisions due to round-off errors occur in the test not adjusted for ties between samples. .. versionadded:: 0.14.0 References ---------- .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample Anderson-Darling Tests, Journal of the American Statistical Association, Vol. 82, pp. 918-924. Examples -------- >>> from scipy import stats >>> np.random.seed(314159) The null hypothesis that the two random samples come from the same distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate significance level of 3.1%: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(loc=0.5, size=30)]) (2.4615796189876105, array([ 0.325, 1.226, 1.961, 2.718, 3.752]), 0.03134990135800783) The null hypothesis cannot be rejected for three samples from an identical distribution. The approximate p-value (87%) has to be computed by extrapolation and may not be very accurate: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(size=30), np.random.normal(size=20)]) (-0.73091722665244196, array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]), 0.8789283903979661) """ k = len(samples) if (k < 2): raise ValueError("anderson_ksamp needs at least two samples") samples = list(map(np.asarray, samples)) Z = np.sort(np.hstack(samples)) N = Z.size Zstar = np.unique(Z) if Zstar.size < 2: raise ValueError("anderson_ksamp needs more than one distinct " "observation") n = np.array([sample.size for sample in samples]) if any(n == 0): raise ValueError("anderson_ksamp encountered sample without " "observations") if midrank: A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) else: A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) H = (1. / n).sum() hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() h = hs_cs[-1] + 1 g = (hs_cs / arange(2, N)).sum() a = (4*g - 6) * (k - 1) + (10 - 6*g)*H b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h d = (2*h + 6)*k**2 - 4*h*k sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) m = k - 1 A2 = (A2kN - m) / math.sqrt(sigmasq) # The b_i values are the interpolation coefficients from Table 2 # of Scholz and Stephens 1987 b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326]) b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822]) b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396]) critical = b0 + b1 / math.sqrt(m) + b2 / m pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2) if A2 < critical.min() or A2 > critical.max(): warnings.warn("approximate p-value will be computed by extrapolation") p = math.exp(np.polyval(pf, A2)) Anderson_ksampResult = namedtuple('Anderson_ksampResult', ('statistic', 'critical_values', 'significance_level')) return Anderson_ksampResult(A2, critical, p) def ansari(x, y): """ Perform the Ansari-Bradley test for equal scale parameters The Ansari-Bradley test is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. Parameters ---------- x, y : array_like arrays of sample data Returns ------- statistic : float The Ansari-Bradley test statistic pvalue : float The p-value of the hypothesis test See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. """ x, y = asarray(x), asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) N = m + n xy = r_[x, y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank, N - rank + 1)), 0) AB = sum(symrank[:n], axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m < 55) and (n < 55) and not repeats) if repeats and (m < 55 or n < 55): warnings.warn("Ties preclude use of exact statistic.") if exact: astart, a1, ifault = statlib.gscale(n, m) ind = AB - astart total = sum(a1, axis=0) if ind < len(a1)/2.0: cind = int(ceil(ind)) if ind == cind: pval = 2.0 * sum(a1[:cind+1], axis=0) / total else: pval = 2.0 * sum(a1[:cind], axis=0) / total else: find = int(floor(ind)) if ind == floor(ind): pval = 2.0 * sum(a1[find:], axis=0) / total else: pval = 2.0 * sum(a1[find+1:], axis=0) / total return AnsariResult(AB, min(1.0, pval)) # otherwise compute normal approximation if N % 2: # N odd mnAB = n * (N+1.0)**2 / 4.0 / N varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) else: mnAB = n * (N+2.0) / 4.0 varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) if repeats: # adjust variance estimates # compute sum(tj * rj**2,axis=0) fac = sum(symrank**2, axis=0) if N % 2: # N odd varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) else: # N even varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) z = (AB - mnAB) / sqrt(varAB) pval = distributions.norm.sf(abs(z)) * 2.0 return AnsariResult(AB, pval) def bartlett(*args): """ Perform Bartlett's test for equal variances Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene` is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. May be different lengths. Returns ------- statistic : float The test statistic. pvalue : float The p-value of the test. See Also -------- fligner : A non-parametric test for the equality of k variances levene : A robust parametric test for equality of k variances Notes ----- Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. """ BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) # Handle empty input for a in args: if np.asanyarray(a).size == 0: return BartlettResult(np.nan, np.nan) k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) ssq = zeros(k, 'd') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = sum(Ni, axis=0) spsq = sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) numer = (Ntot*1.0 - k) * log(spsq) - sum((Ni - 1.0)*log(ssq), axis=0) denom = 1.0 + 1.0/(3*(k - 1)) * ((sum(1.0/(Ni - 1.0), axis=0)) - 1.0/(Ntot - k)) T = numer / denom pval = distributions.chi2.sf(T, k - 1) # 1 - cdf return BartlettResult(T, pval) def levene(*args, **kwds): """ Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("levene() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) Yci = zeros(k, 'd') if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = sum(Ni, axis=0) # compute Zij's Zij = [None] * k for i in range(k): Zij[i] = abs(asarray(args[i]) - Yci[i]) # compute Zbari Zbari = zeros(k, 'd') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i] * Ni[i] Zbar /= Ntot numer = (Ntot - k) * sum(Ni * (Zbari - Zbar)**2, axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += sum((Zij[i] - Zbari[i])**2, axis=0) denom = (k - 1.0) * dvar W = numer / denom pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) return LeveneResult(W, pval) @setastest(False) def binom_test(x, n=None, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : integer or array_like the number of successes, or if x has length 2, it is the number of successes and the number of failures. n : integer the number of trials. This is ignored if x gives both the number of successes and failures p : float, optional The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5 Returns ------- p-value : float The p-value of the hypothesis test References ---------- .. [1] http://en.wikipedia.org/wiki/Binomial_test """ x = atleast_1d(x).astype(np.integer) if len(x) == 2: n = x[1] + x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") if alternative == 'less': pval = distributions.binom.cdf(x, n, p) return pval if alternative == 'greater': pval = distributions.binom.sf(x-1, n, p) return pval # if alternative was neither 'less' nor 'greater', then it's 'two-sided' d = distributions.binom.pmf(x, n, p) rerr = 1 + 1e-7 if x == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif x < p * n: i = np.arange(np.ceil(p * n), n+1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)) else: i = np.arange(np.floor(p*n) + 1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(y-1, n, p) + distributions.binom.sf(x-1, n, p)) return min(1.0, pval) def _apply_func(x, g, func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) output = [] for k in range(len(g) - 1): output.append(func(x[g[k]:g[k+1]])) return asarray(output) def fligner(*args, **kwds): """ Perform Fligner-Killeen test for equality of variance. Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner-Killeen's test is distribution free when populations are identical [2]_. Parameters ---------- sample1, sample2, ... : array_like Arrays of sample data. Need not be the same length. center : {'mean', 'median', 'trimmed'}, optional Keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the hypothesis test. See Also -------- bartlett : A parametric test for equality of k variances in normal samples levene : A robust parametric test for equality of k variances Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A comparative study of tests for homogeneity of variances, with applications to the outer continental shelf biding data. Technometrics, 23(4), 351-361. """ FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) # Handle empty input for a in args: if np.asanyarray(a).size == 0: return FlignerResult(np.nan, np.nan) # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("fligner() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = sum(Ni, axis=0) # compute Zij's Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a, g, sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a, axis=0, ddof=1) Xsq = sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf return FlignerResult(Xsq, pval) def mood(x, y, axis=0): """ Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. axis : int, optional The axis along which the samples are tested. `x` and `y` can be of different length along `axis`. If `axis` is None, `x` and `y` are flattened and the test is done on all values in the flattened arrays. Returns ------- z : scalar or ndarray The z-score for the hypothesis test. For 1-D inputs a scalar is returned. p-value : scalar ndarray The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions ``f(x)`` and ``f(x/s) / s`` respectively, for some probability density function f. The null hypothesis is that ``s == 1``. For multi-dimensional arrays, if the inputs are of shapes ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the resulting z and p values will have shape ``(n0, n2, n3)``. Note that ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. Examples -------- >>> from scipy import stats >>> x2 = np.random.randn(2, 45, 6, 7) >>> x1 = np.random.randn(2, 30, 6, 7) >>> z, p = stats.mood(x1, x2, axis=1) >>> p.shape (2, 6, 7) Find the number of points where the difference in scale is not significant: >>> (p > 0.1).sum() 74 Perform the test with different scales: >>> x1 = np.random.randn(2, 30) >>> x2 = np.random.randn(2, 35) * 10.0 >>> stats.mood(x1, x2, axis=1) (array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08])) """ x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if axis is None: x = x.flatten() y = y.flatten() axis = 0 # Determine shape of the result arrays res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if ax != axis])): raise ValueError("Dimensions of x and y on all axes except `axis` " "should match") n = x.shape[axis] m = y.shape[axis] N = m + n if N < 3: raise ValueError("Not enough observations.") xy = np.concatenate((x, y), axis=axis) if axis != 0: xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], -1) # Generalized to the n-dimensional case by adding the axis argument, and # using for loops, since rankdata is not vectorized. For improving # performance consider vectorizing rankdata function. all_ranks = np.zeros_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = sum((Ri - (N + 1.0) / 2)**2, axis=0) # Approx stat. mnM = n * (N * N - 1.0) / 12 varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 z = (M - mnM) / sqrt(varM) # sf for right tail, cdf for left tail. Factor 2 for two-sidedness z_pos = z > 0 pval = np.zeros_like(z) pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) if res_shape == (): # Return scalars, not 0-D arrays z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return z, pval def wilcoxon(x, y=None, zero_method="wilcox", correction=False): """ Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like The first set of measurements. y : array_like, optional The second set of measurements. If `y` is not given, then the `x` array is considered to be the differences between the two sets of measurements. zero_method : string, {"pratt", "wilcox", "zsplit"}, optional "pratt": Pratt treatment: includes zero-differences in the ranking process (more conservative) "wilcox": Wilcox treatment: discards all zero-differences "zsplit": Zero rank split: just like Pratt, but spliting the zero rank between positive and negative ones correction : bool, optional If True, apply continuity correction by adjusting the Wilcoxon rank statistic by 0.5 towards the mean value when computing the z-statistic. Default is False. Returns ------- statistic : float The sum of the ranks of the differences above or below zero, whichever is smaller. pvalue : float The two-sided p-value for the test. Notes ----- Because the normal approximation is used for the calculations, the samples used should be large. A typical rule is to require that n > 20. References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test """ if zero_method not in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method should be either 'wilcox' " "or 'pratt' or 'zsplit'") if y is None: d = x else: x, y = map(asarray, (x, y)) if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x - y if zero_method == "wilcox": # Keep all non-zero differences d = compress(np.not_equal(d, 0), d, axis=-1) count = len(d) if count < 10: warnings.warn("Warning: sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = sum((d > 0) * r, axis=0) r_minus = sum((d < 0) * r, axis=0) if zero_method == "zsplit": r_zero = sum((d == 0) * r, axis=0) r_plus += r_zero / 2. r_minus += r_zero / 2. T = min(r_plus, r_minus) mn = count * (count + 1.) * 0.25 se = count * (count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] replist, repnum = find_repeats(r) if repnum.size != 0: # Correction for repeated elements. se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() se = sqrt(se / 24) correction = 0.5 * int(bool(correction)) * np.sign(T - mn) z = (T - mn - correction) / se prob = 2. * distributions.norm.sf(abs(z)) WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) return WilcoxonResult(T, prob) @setastest(False) def median_test(*args, **kwds): """ Mood's median test. Test that two or more samples come from populations with the same median. Let ``n = len(args)`` be the number of samples. The "grand median" of all the data is computed, and a contingency table is formed by classifying the values in each sample as being above or below the grand median. The contingency table, along with `correction` and `lambda_`, are passed to `scipy.stats.chi2_contingency` to compute the test statistic and p-value. Parameters ---------- sample1, sample2, ... : array_like The set of samples. There must be at least two samples. Each sample must be a one-dimensional sequence containing at least one value. The samples are not required to have the same length. ties : str, optional Determines how values equal to the grand median are classified in the contingency table. The string must be one of:: "below": Values equal to the grand median are counted as "below". "above": Values equal to the grand median are counted as "above". "ignore": Values equal to the grand median are not counted. The default is "below". correction : bool, optional If True, *and* there are just two samples, apply Yates' correction for continuity when computing the test statistic associated with the contingency table. Default is True. lambda_ : float or str, optional. By default, the statistic computed in this test is Pearson's chi-squared statistic. `lambda_` allows a statistic from the Cressie-Read power divergence family to be used instead. See `power_divergence` for details. Default is 1 (Pearson's chi-squared statistic). Returns ------- stat : float The test statistic. The statistic that is returned is determined by `lambda_`. The default is Pearson's chi-squared statistic. p : float The p-value of the test. m : float The grand median. table : ndarray The contingency table. The shape of the table is (2, n), where n is the number of samples. The first row holds the counts of the values above the grand median, and the second row holds the counts of the values below the grand median. The table allows further analysis with, for example, `scipy.stats.chi2_contingency`, or with `scipy.stats.fisher_exact` if there are two samples, without having to recompute the table. See Also -------- kruskal : Compute the Kruskal-Wallis H-test for independent samples. mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. Notes ----- .. versionadded:: 0.15.0 References ---------- .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill (1950), pp. 394-399. .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). See Sections 8.12 and 10.15. Examples -------- A biologist runs an experiment in which there are three groups of plants. Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. Each plant produces a number of seeds. The seed counts for each group are:: Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 The following code applies Mood's median test to these samples. >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] >>> from scipy.stats import median_test >>> stat, p, med, tbl = median_test(g1, g2, g3) The median is >>> med 34.0 and the contingency table is >>> tbl array([[ 5, 10, 7], [11, 5, 10]]) `p` is too large to conclude that the medians are not the same: >>> p 0.12609082774093244 The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to `median_test`. >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") >>> p 0.12224779737117837 The median occurs several times in the data, so we'll get a different result if, for example, ``ties="above"`` is used: >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") >>> p 0.063873276069553273 >>> tbl array([[ 5, 11, 9], [11, 4, 8]]) This example demonstrates that if the data set is not large and there are values equal to the median, the p-value can be sensitive to the choice of `ties`. """ ties = kwds.pop('ties', 'below') correction = kwds.pop('correction', True) lambda_ = kwds.pop('lambda_', None) if len(kwds) > 0: bad_kwd = kwds.keys()[0] raise TypeError("median_test() got an unexpected keyword " "argument %r" % bad_kwd) if len(args) < 2: raise ValueError('median_test requires two or more samples.') ties_options = ['below', 'above', 'ignore'] if ties not in ties_options: raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " "of: %s" % (ties, str(ties_options)[1:-1])) data = [np.asarray(arg) for arg in args] # Validate the sizes and shapes of the arguments. for k, d in enumerate(data): if d.size == 0: raise ValueError("Sample %d is empty. All samples must " "contain at least one value." % (k + 1)) if d.ndim != 1: raise ValueError("Sample %d has %d dimensions. All " "samples must be one-dimensional sequences." % (k + 1, d.ndim)) grand_median = np.median(np.concatenate(data)) # Create the contingency table. table = np.zeros((2, len(data)), dtype=np.int64) for k, sample in enumerate(data): nabove = count_nonzero(sample > grand_median) nbelow = count_nonzero(sample < grand_median) nequal = sample.size - (nabove + nbelow) table[0, k] += nabove table[1, k] += nbelow if ties == "below": table[1, k] += nequal elif ties == "above": table[0, k] += nequal # Check that no row or column of the table is all zero. # Such a table can not be given to chi2_contingency, because it would have # a zero in the table of expected frequencies. rowsums = table.sum(axis=1) if rowsums[0] == 0: raise ValueError("All values are below the grand median (%r)." % grand_median) if rowsums[1] == 0: raise ValueError("All values are above the grand median (%r)." % grand_median) if ties == "ignore": # We already checked that each sample has at least one value, but it # is possible that all those values equal the grand median. If `ties` # is "ignore", that would result in a column of zeros in `table`. We # check for that case here. zero_cols = np.where((table == 0).all(axis=0))[0] if len(zero_cols) > 0: msg = ("All values in sample %d are equal to the grand " "median (%r), so they are ignored, resulting in an " "empty sample." % (zero_cols[0] + 1, grand_median)) raise ValueError(msg) stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, correction=correction) return stat, p, grand_median, table def _hermnorm(N): # return the negatively normalized hermite polynomials up to order N-1 # (inclusive) # using the recursive relationship # p_n+1 = p_n(x)' - x*p_n(x) # and p_0(x) = 1 plist = [None] * N plist[0] = poly1d(1) for n in range(1, N): plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1] return plist # Note: when removing pdf_fromgamma, also remove the _hermnorm support function @np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 " "in favour of statsmodels.distributions.ExpandedNormal.") def pdf_fromgamma(g1, g2, g3=0.0, g4=None): if g4 is None: g4 = 3 * g2**2 sigsq = 1.0 / g2 sig = sqrt(sigsq) mu = g1 * sig**3.0 p12 = _hermnorm(13) for k in range(13): p12[k] /= sig**k # Add all of the terms to polynomial totp = (p12[0] - g1/6.0*p12[3] + g2/24.0*p12[4] + g1**2/72.0 * p12[6] - g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] + g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] + g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12]) # Final normalization totp = totp / sqrt(2*pi) / sig def thefunc(x): xn = (x - mu) / sig return totp(xn) * exp(-xn**2 / 2.) return thefunc def _circfuncs_common(samples, high, low): samples = np.asarray(samples) if samples.size == 0: return np.nan, np.nan ang = (samples - low)*2*pi / (high - low) return samples, ang def circmean(samples, high=2*pi, low=0, axis=None): """ Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. Returns ------- circmean : float Circular mean. """ samples, ang = _circfuncs_common(samples, high, low) res = angle(np.mean(exp(1j * ang), axis=axis)) mask = res < 0 if mask.ndim > 0: res[mask] += 2*pi elif mask: res += 2*pi return res*(high - low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None): """ Compute the circular variance for samples assumed to be in a range Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular variance range. Default is 0. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi)**2 * 2 * log(1/R) def circstd(samples, high=2*pi, low=0, axis=None): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi) * sqrt(-2*log(R)) # Tests to include (from R) -- some of these already in stats. ######## # X Ansari-Bradley # X Bartlett (and Levene) # X Binomial # Y Pearson's Chi-squared (stats.chisquare) # Y Association Between Paired samples (stats.pearsonr, stats.spearmanr) # stats.kendalltau) -- these need work though # Fisher's exact test # X Fligner-Killeen Test # Y Friedman Rank Sum (stats.friedmanchisquare?) # Y Kruskal-Wallis # Y Kolmogorov-Smirnov # Cochran-Mantel-Haenszel Chi-Squared for Count # McNemar's Chi-squared for Count # X Mood Two-Sample # X Test For Equal Means in One-Way Layout (see stats.ttest also) # Pairwise Comparisons of proportions # Pairwise t tests # Tabulate p values for pairwise comparisons # Pairwise Wilcoxon rank sum tests # Power calculations two sample test of prop. # Power calculations for one and two sample t tests # Equal or Given Proportions # Trend in Proportions # Quade Test # Y Student's T Test # Y F Test to compare two variances # XY Wilcoxon Rank Sum and Signed Rank Tests
bsd-3-clause
sosey/ginga
ginga/mplw/FigureCanvasQt.py
1
2460
# # GingaCanvasQt.py -- classes for the display of FITS files in # Matplotlib FigureCanvas # # Eric Jeschke (eric@naoj.org) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. from __future__ import print_function from ginga.toolkit import toolkit if toolkit == 'qt5': from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as QtFigureCanvas else: from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as QtFigureCanvas from ginga.qtw.QtHelp import QtGui, QtCore def setup_Qt(widget, viewer): def resizeEvent(*args): rect = widget.geometry() x1, y1, x2, y2 = rect.getCoords() width = x2 - x1 height = y2 - y1 if viewer is not None: viewer.configure(width, height) widget.setFocusPolicy(QtCore.Qt.FocusPolicy( QtCore.Qt.TabFocus | QtCore.Qt.ClickFocus | QtCore.Qt.StrongFocus | QtCore.Qt.WheelFocus)) widget.setMouseTracking(True) widget.setAcceptDrops(True) # Matplotlib has a bug where resize events are not reported widget.connect(widget, QtCore.SIGNAL('resizeEvent()'), resizeEvent) class FigureCanvas(QtFigureCanvas): """Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.). """ def __init__(self, fig, parent=None, width=5, height=4, dpi=100): QtFigureCanvas.__init__(self, fig) self.viewer = None setup_Qt(self, None) self.setParent(parent) FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) FigureCanvas.updateGeometry(self) def resizeEvent(self, event): rect = self.geometry() x1, y1, x2, y2 = rect.getCoords() width = x2 - x1 height = y2 - y1 if self.viewer is not None: self.viewer.configure(width, height) return super(FigureCanvas, self).resizeEvent(event) def sizeHint(self): width, height = 300, 300 if self.viewer is not None: width, height = self.viewer.get_desired_size() return QtCore.QSize(width, height) def set_viewer(self, viewer): self.viewer = viewer #END
bsd-3-clause