Dataset Viewer
Auto-converted to Parquet Duplicate
repo_name
stringlengths
9
55
path
stringlengths
7
120
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
169k
license
stringclasses
12 values
joshbohde/scikit-learn
examples/plot_permutation_test_for_classification.py
2
2049
""" ================================================================= Test with permutations the significance of a classification score ================================================================= In order to test if a classification score is significative a technique in repeating the classification procedure after randomizing, permuting, the labels. The p-value is then given by the percentage of runs for which the score obtained is greater than the classification score obtained in the first place. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD print __doc__ import numpy as np import pylab as pl from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold, permutation_test_score from sklearn import datasets from sklearn.metrics import zero_one_score ############################################################################## # Loading a dataset iris = datasets.load_iris() X = iris.data y = iris.target n_classes = np.unique(y).size # Some noisy data not correlated random = np.random.RandomState(seed=0) E = random.normal(size=(len(X), 2200)) # Add noisy data to the informative features for make the task harder X = np.c_[X, E] svm = SVC(kernel='linear') cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score(svm, X, y, zero_one_score, cv=cv, n_permutations=100, n_jobs=1) print "Classification score %s (pvalue : %s)" % (score, pvalue) ############################################################################### # View histogram of permutation scores pl.hist(permutation_scores, label='Permutation scores') ylim = pl.ylim() pl.vlines(score, ylim[0], ylim[1], linestyle='--', color='g', linewidth=3, label='Classification Score' ' (pvalue %s)' % pvalue) pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--', color='k', linewidth=3, label='Luck') pl.ylim(ylim) pl.legend() pl.xlabel('Score') pl.show()
bsd-3-clause
CG-F16-24-Rutgers/steersuite-rutgers
steerstats/tools/plotting/plotMultiObjectiveData.py
8
1340
import csv import matplotlib.pyplot as plt import sys import numpy as np # filename = '../../data/optimization/sf/multiObjective/SteerStatsOpt2.csv' filename = sys.argv[1] xs = [] ys = [] if len(sys.argv) == 2: csvfile = open(filename, 'r') spamreader = csv.reader(csvfile, delimiter=',') xs = [] ys = [] for row in spamreader: xs.append(float(row[0])) ys.append(float(row[1])) elif len(sys.argv) == 3: for i in range(1, int(sys.argv[2])): tmp_filename = filename + str(i) + ".log" csvfile = open(tmp_filename, 'r') spamreader = csv.reader(csvfile, delimiter=',') for row in spamreader: xs.append(float(row[0])) ys.append(float(row[1])) else: print "Wrong" sys.exit() print "xs = " + str(xs) print "ys = " + str(ys) x_min = np.amin(xs) x_max = np.amax(xs) y_min = np.amin(ys) y_max = np.amax(ys) new_xs = (xs - x_min) / (x_max - x_min) new_ys = (ys - y_min) / (y_max - y_min) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(new_xs, new_ys, c="b") # ax.set_xlim([np.amin(xs), np.amax(xs)]) # ax.set_ylim([np.amin(ys), np.amax(ys)]) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.0]) ax.set_xlabel('efficency metric') ax.set_ylabel('PLE metric') ax.set_title("multi-objective optimization") # plt.axis("tight") plt.show()
gpl-3.0
maxlikely/scikit-learn
sklearn/pipeline.py
1
13051
""" The :mod:`sklearn.pipeline` module implements utilites to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Licence: BSD import numpy as np from scipy import sparse from .base import BaseEstimator, TransformerMixin from .externals.joblib import Parallel, delayed from .externals import six __all__ = ['Pipeline', 'FeatureUnion'] # One round of beers on me if someone finds out why the backslash # is needed in the Attributes section so as not to upset sphinx. class Pipeline(BaseEstimator): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implements fit and transform methods. The final estimator needs only implements fit. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. Parameters ---------- steps: list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svn >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS Pipeline(steps=[...]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) 0.75 """ # BaseEstimator interface def __init__(self, steps): self.named_steps = dict(steps) names, estimators = zip(*steps) if len(self.named_steps) != len(steps): raise ValueError("Names provided are not unique: %s" % names) self.steps = zip(names, estimators) # shallow copy of steps transforms = estimators[:-1] estimator = estimators[-1] for t in transforms: if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All intermediate steps a the chain should " "be transforms and implement fit and transform" "'%s' (type %s) doesn't)" % (t, type(t))) if not hasattr(estimator, "fit"): raise TypeError("Last step of chain should implement fit " "'%s' (type %s) doesn't)" % (estimator, type(estimator))) def get_params(self, deep=True): if not deep: return super(Pipeline, self).get_params(deep=False) else: out = self.named_steps.copy() for name, step in six.iteritems(self.named_steps): for key, value in six.iteritems(step.get_params(deep=True)): out['%s__%s' % (name, key)] = value return out # Estimator interface def _pre_transform(self, X, y=None, **fit_params): fit_params_steps = dict((step, {}) for step, _ in self.steps) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for name, transform in self.steps[:-1]: if hasattr(transform, "fit_transform"): Xt = transform.fit_transform(Xt, y, **fit_params_steps[name]) else: Xt = transform.fit(Xt, y, **fit_params_steps[name]) \ .transform(Xt) return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) self.steps[-1][-1].fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then use fit_transform on transformed data using the final estimator.""" Xt, fit_params = self._pre_transform(X, y, **fit_params) if hasattr(self.steps[-1][-1], 'fit_transform'): return self.steps[-1][-1].fit_transform(Xt, y, **fit_params) else: return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt) def predict(self, X): """Applies transforms to the data, and the predict method of the final estimator. Valid only if the final estimator implements predict.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) def predict_proba(self, X): """Applies transforms to the data, and the predict_proba method of the final estimator. Valid only if the final estimator implements predict_proba.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) def decision_function(self, X): """Applies transforms to the data, and the decision_function method of the final estimator. Valid only if the final estimator implements decision_function.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) def predict_log_proba(self, X): Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) def transform(self, X): """Applies transforms to the data, and the transform method of the final estimator. Valid only if the final estimator implements transform.""" Xt = X for name, transform in self.steps: Xt = transform.transform(Xt) return Xt def inverse_transform(self, X): if X.ndim == 1: X = X[None, :] Xt = X for name, step in self.steps[::-1]: Xt = step.inverse_transform(Xt) return Xt def score(self, X, y=None): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].score(Xt, y) @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False) def _fit_one_transformer(transformer, X, y): transformer.fit(X, y) def _transform_one(transformer, name, X, transformer_weights): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output return transformer.transform(X) * transformer_weights[name] return transformer.transform(X) def _fit_transform_one(transformer, name, X, y, transformer_weights, **fit_params): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output if hasattr(transformer, 'fit_transform'): return (transformer.fit_transform(X, y, **fit_params) * transformer_weights[name]) else: return (transformer.fit(X, y, **fit_params).transform(X) * transformer_weights[name]) if hasattr(transformer, 'fit_transform'): return transformer.fit_transform(X, y, **fit_params) else: return transformer.fit(X, y, **fit_params).transform(X) class FeatureUnion(BaseEstimator, TransformerMixin): """Concatenates results of multiple transformer objects. This estimator applies a list of transformer objects in parallel to the input data, then concatenates the results. This is useful to combine several feature extraction mechanisms into a single transformer. Parameters ---------- transformers: list of (name, transformer) List of transformer objects to be applied to the data. n_jobs: int, optional Number of jobs to run in parallel (default 1). transformer_weights: dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. """ def __init__(self, transformer_list, n_jobs=1, transformer_weights=None): self.transformer_list = transformer_list self.n_jobs = n_jobs self.transformer_weights = transformer_weights def get_feature_names(self): """Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. """ feature_names = [] for name, trans in self.transformer_list: if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s does not provide" " get_feature_names." % str(name)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names def fit(self, X, y=None): """Fit all transformers using X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data, used to fit transformers. """ Parallel(n_jobs=self.n_jobs)( delayed(_fit_one_transformer)(trans, X, y) for name, trans in self.transformer_list) return self def fit_transform(self, X, y=None, **fit_params): """Fit all tranformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, name, X, y, self.transformer_weights, **fit_params) for name, trans in self.transformer_list) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def transform(self, X): """Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, name, X, self.transformer_weights) for name, trans in self.transformer_list) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def get_params(self, deep=True): if not deep: return super(FeatureUnion, self).get_params(deep=False) else: out = dict(self.transformer_list) for name, trans in self.transformer_list: for key, value in trans.get_params(deep=True).iteritems(): out['%s__%s' % (name, key)] = value return out
bsd-3-clause
martinggww/lucasenlights
MachineLearning/python_tutorial/KNearestNeighborhood.py
1
1274
''' Classification algorithm Create a model that seperate a dataset proximity probability nearest neighbors What the hack is K? if K=2, find the closet 2 points We want K = odd numbers, K=3, 5, 7... ''' ''' - - +, 66.7% confidence, confidence, accuracy Euclid distance, euclid distance middle point Dataset and the relavent data features class and label id, clump_thickness, unif_cell_size, unif_cell_shape ''' import numpy as np from sklearn import preprocessing, cross_validation, neighbors import pandas as pd df = pd.read_csv('./dataset/breastcancer.csv') print df.head(2) #Most algorithm will recognize it as outliers df.replace('?',-99999, inplace=True) #drop useless column df.drop(['id'], 1, inplace=True) #Convert X to np.array X = np.array(df.drop(['class'], 1)) y = np.array(df['class'] ) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) #Why we didn't set K??? clf = neighbors.KNeighborsClassifier() clf.fit(X_train, y_train) accuracy = clf.score(X_test, y_test) print accuracy #it's also a ndarry example_measures = np.array([[4,2,1,1,1,2,3,2,1],[4,2,1,1,1,2,3,2,1],[4,2,1,1,1,2,3,2,1]]) example_measures = example_measures.reshape(len(example_measures), -1) score = clf.predict(example_measures) print score
cc0-1.0
treycausey/scikit-learn
sklearn/feature_selection/__init__.py
244
1088
""" The :mod:`sklearn.feature_selection` module implements feature selection algorithms. It currently includes univariate filter selection methods and the recursive feature elimination algorithm. """ from .univariate_selection import chi2 from .univariate_selection import f_classif from .univariate_selection import f_oneway from .univariate_selection import f_regression from .univariate_selection import SelectPercentile from .univariate_selection import SelectKBest from .univariate_selection import SelectFpr from .univariate_selection import SelectFdr from .univariate_selection import SelectFwe from .univariate_selection import GenericUnivariateSelect from .variance_threshold import VarianceThreshold from .rfe import RFE from .rfe import RFECV __all__ = ['GenericUnivariateSelect', 'RFE', 'RFECV', 'SelectFdr', 'SelectFpr', 'SelectFwe', 'SelectKBest', 'SelectPercentile', 'VarianceThreshold', 'chi2', 'f_classif', 'f_oneway', 'f_regression']
bsd-3-clause
chenyyx/scikit-learn-doc-zh
examples/zh/cluster/plot_dict_face_patches.py
9
2747
""" Online learning of a dictionary of parts of faces ================================================== This example uses a large dataset of faces to learn a set of 20 x 20 images patches that constitute faces. From the programming standpoint, it is interesting because it shows how to use the online API of the scikit-learn to process a very large dataset by chunks. The way we proceed is that we load an image at a time and extract randomly 50 patches from this image. Once we have accumulated 500 of these patches (using 10 images), we run the `partial_fit` method of the online KMeans object, MiniBatchKMeans. The verbose setting on the MiniBatchKMeans enables us to see that some clusters are reassigned during the successive calls to partial-fit. This is because the number of patches that they represent has become too low, and it is better to choose a random new cluster. """ print(__doc__) import time import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.image import extract_patches_2d faces = datasets.fetch_olivetti_faces() # ############################################################################# # Learn the dictionary of images print('Learning the dictionary... ') rng = np.random.RandomState(0) kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True) patch_size = (20, 20) buffer = [] index = 1 t0 = time.time() # The online learning part: cycle over the whole dataset 6 times index = 0 for _ in range(6): for img in faces.images: data = extract_patches_2d(img, patch_size, max_patches=50, random_state=rng) data = np.reshape(data, (len(data), -1)) buffer.append(data) index += 1 if index % 10 == 0: data = np.concatenate(buffer, axis=0) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) kmeans.partial_fit(data) buffer = [] if index % 100 == 0: print('Partial fit of %4i out of %i' % (index, 6 * len(faces.images))) dt = time.time() - t0 print('done in %.2fs.' % dt) # ############################################################################# # Plot the results plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % (dt, 8 * len(faces.images)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
gpl-3.0
jrcapriles/gameSimulator
gameSimulator.py
1
6837
# -*- coding: utf-8 -*- """ Created on Thu Sep 18 19:27:57 2014 @author: joser """ import pygame, ode, random, Buttons from math import atan2, acos, asin, sin, cos import matplotlib.pyplot as plt from pygame.locals import * from numpy import * from Point import * from Buttons import * class gameSimulator( object ): def __init__( self, *args, **kwargs): # Initialize pygame pygame.init() self.width = kwargs.get('width',600) self.height = kwargs.get('height',400) self.length = kwargs.get('length',200) self.fps = kwargs.get('fps',50) self.G = kwargs.get('gravity',-9.81) self.world = ode.World() self.world.setGravity((0,self.G,0)) self.createScreen() self.createButtons() #Variables of this game self.FIRE = False self.ANGLE = 0.25 self.POWER = 6.5 self.dt = 1.0/self.fps self.buffer = [] self.bufferHead = 0 self.bufferTail = 0 self.bufferIsEmpty = True self.bufferSize = 100 self.correctX = 0 self.correctY = 0 self.initBuffer() def initBuffer(self): for i in range(0,self.bufferSize): self.buffer.append((0,0)) def createScreen(self): # Open a display self.srf = pygame.display.set_mode((self.width,self.height)) pygame.display.set_caption("Game Simulator") #Parameters self.dt = 1.0/self.fps self.loopFlag = True def createButtons(self): #Buttons self.goal_button = Buttons.Button(self.srf, color = (200,0,0), x = 10, y = 10, length = 50, height = 25, width = 0, text = "Button_1", text_color = (255,255,255), font_size = 20, fade_on = False) self.switch_button = Buttons.Button(self.srf, color = (200,0,0), x = 60, y = 10, length = 50, height = 25, width = 0, text = "Button_2", text_color = (255,255,255), font_size = 20, fade_on = False) self.follow_button = Buttons.Button(self.srf, color = (200,0,0), x = 110, y = 10, length = 50, height = 25, width = 0, text = "Button_3", text_color = (255,255,255), font_size = 20, fade_on = False) self.noise_button = Buttons.Button(self.srf, color = (200,0,0), x = 160, y = 10, length = 50, height = 25, width = 0, text = "Button_4", text_color = (255,255,255), font_size = 20, fade_on = False) #Button Dictionary self.buttons = {0 : self.goal_button, 1 : self.switch_button, 2 : self.follow_button, 3 : self.noise_button} def loadBackground(self,filename): self.backgroundImage = pygame.image.load(filename).convert() self.backgroundRect = self.backgroundImage.get_rect() def loadImage(self, filename): image = pygame.image.load(filename) return image def world2screen(self, x, y): return int(self.width/2 + 128*x), int(self.length/2 - 128*y) def screen2world(self, x, y): return (float(x - self.width/2)/128), (float(-y + self.length/2)/128) def checkEvents(self): events = pygame.event.get() for e in events: if e.type==QUIT: pygame.quit() elif e.type==KEYDOWN: if e.key == K_f: print "FIRE!!!" self.FIRE = True self.Vox = self.POWER * cos(self.ANGLE) self.Voy = self.POWER * sin(self.ANGLE) elif e.key == K_UP: self.ANGLE = self.ANGLE + 0.1 print self.POWER, self.ANGLE elif e.key == K_DOWN: self.ANGLE = self.ANGLE - 0.1 print self.POWER, self.ANGLE elif e.key == K_LEFT: self.POWER = self.POWER - 0.1 print self.POWER, self.ANGLE elif e.key == K_RIGHT: self.POWER = self.POWER + 0.1 print self.POWER, self.ANGLE else: pygame.quit() def updateBackground(self, color = None): if color is not None: self.srf.fill(color) else: self.srf.blit(self.backgroundImage,self.backgroundRect) def updateImage(self,image, position): self.srf.blit(image,position) self.addBuffer(position) def getBuffer(self): return zip(*self.buffer) def addBuffer(self,newValue): self.buffer[self.bufferHead] = newValue if self.bufferHead == self.bufferSize-1: self.bufferHead = 0 self.bufferTail = 0 else: if self.bufferHead == self.bufferTail and not self.bufferIsEmpty: self.bufferHead = self.bufferHead +1 self.bufferTail = self.bufferHead else: self.bufferHead = self.bufferHead +1 self.bufferIsEmpty = False def updateTrace(self,x,y,color): for i in range(0,self.bufferSize): self.srf.set_at((x[i]+self.correctX,y[i]+self.correctY),color) def run(self): # Simulation loop. self.clk = pygame.time.Clock() self.loadBackground("images/screen.png") gun = self.loadImage("images/gun.jpg") gunPos = [50,320] bullet = self.loadImage("images/bullet.jpg") self.correctX, self.correctY = bullet.get_rect().size self.correctX = self.correctX/2 self.correctY = self.correctY/2 x,y = self.screen2world(230,320) while True: # Check for events self.checkEvents() self.updateBackground() self.updateImage(gun,gunPos) if self.FIRE: self.Voy = self.Voy + self.G* self.dt x = x + self.Vox*self.dt y = y + self.Voy*self.dt + 0.5*self.G*self.dt**2 self.updateImage(bullet, self.world2screen(x,y)) else: if self.bufferIsEmpty is False: plotx, ploty = self.getBuffer() self.updateTrace(plotx, ploty,(255,255,255)) if self.FIRE and (y < -2.5 or y >2.5 or x >2.5 or x<-2.5): self.FIRE = False x,y = self.screen2world(230,320) plotx, ploty = self.getBuffer() pygame.display.flip() # Next simulation step self.world.step(self.dt) # Try to keep the specified framerate self.clk.tick(self.fps)
mit
stevenzhang18/Indeed-Flask
lib/pandas/tests/test_expressions.py
9
16557
# -*- coding: utf-8 -*- from __future__ import print_function # pylint: disable-msg=W0612,E1101 import nose import re from numpy.random import randn import operator import numpy as np from pandas.core.api import DataFrame, Panel from pandas.computation import expressions as expr from pandas import compat from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_panel4d_equal) import pandas.util.testing as tm from numpy.testing.decorators import slow if not expr._USE_NUMEXPR: try: import numexpr except ImportError: msg = "don't have" else: msg = "not using" raise nose.SkipTest("{0} numexpr".format(msg)) _frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64') _frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64') _mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') }) _mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') }) _integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64') _integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)), columns=list('ABCD'), dtype='int64') _frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy())) _frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3), ItemC=_frame2.copy(), ItemD=_frame2.copy())) _integer_panel = Panel(dict(ItemA=_integer, ItemB=(_integer + 34).astype('int64'))) _integer2_panel = Panel(dict(ItemA=_integer2, ItemB=(_integer2 + 34).astype('int64'))) _mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3))) _mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3))) class TestExpressions(tm.TestCase): _multiprocess_can_split_ = False def setUp(self): self.frame = _frame.copy() self.frame2 = _frame2.copy() self.mixed = _mixed.copy() self.mixed2 = _mixed2.copy() self.integer = _integer.copy() self._MIN_ELEMENTS = expr._MIN_ELEMENTS def tearDown(self): expr._MIN_ELEMENTS = self._MIN_ELEMENTS @nose.tools.nottest def run_arithmetic_test(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow'] if not compat.PY3: operations.append('div') for arith in operations: operator_name = arith if arith == 'div': operator_name = 'truediv' if test_flex: op = lambda x, y: getattr(df, arith)(y) op.__name__ = arith else: op = getattr(operator, operator_name) expr.set_use_numexpr(False) expected = op(df, other) expr.set_use_numexpr(True) result = op(df, other) try: if check_dtype: if arith == 'truediv': assert expected.dtype.kind == 'f' assert_func(expected, result) except Exception: com.pprint_thing("Failed test with operator %r" % op.__name__) raise def test_integer_arithmetic(self): self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal) self.run_arithmetic_test(self.integer.iloc[:,0], self.integer.iloc[:, 0], assert_series_equal, check_dtype=True) @nose.tools.nottest def run_binary_test(self, df, other, assert_func, test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])): """ tests solely that the result is the same whether or not numexpr is enabled. Need to test whether the function does the correct thing elsewhere. """ expr._MIN_ELEMENTS = 0 expr.set_test_mode(True) operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne'] for arith in operations: if test_flex: op = lambda x, y: getattr(df, arith)(y) op.__name__ = arith else: op = getattr(operator, arith) expr.set_use_numexpr(False) expected = op(df, other) expr.set_use_numexpr(True) expr.get_test_result() result = op(df, other) used_numexpr = expr.get_test_result() try: if arith in numexpr_ops: assert used_numexpr, "Did not use numexpr as expected." else: assert not used_numexpr, "Used numexpr unexpectedly." assert_func(expected, result) except Exception: com.pprint_thing("Failed test with operation %r" % arith) com.pprint_thing("test_flex was %r" % test_flex) raise def run_frame(self, df, other, binary_comp=None, run_binary=True, **kwargs): self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=False, **kwargs) self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True, **kwargs) if run_binary: if binary_comp is None: expr.set_use_numexpr(False) binary_comp = other + 1 expr.set_use_numexpr(True) self.run_binary_test(df, binary_comp, assert_frame_equal, test_flex=False, **kwargs) self.run_binary_test(df, binary_comp, assert_frame_equal, test_flex=True, **kwargs) def run_series(self, ser, other, binary_comp=None, **kwargs): self.run_arithmetic_test(ser, other, assert_series_equal, test_flex=False, **kwargs) self.run_arithmetic_test(ser, other, assert_almost_equal, test_flex=True, **kwargs) # series doesn't uses vec_compare instead of numexpr... # if binary_comp is None: # binary_comp = other + 1 # self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False, # **kwargs) # self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True, # **kwargs) def run_panel(self, panel, other, binary_comp=None, run_binary=True, assert_func=assert_panel_equal, **kwargs): self.run_arithmetic_test(panel, other, assert_func, test_flex=False, **kwargs) self.run_arithmetic_test(panel, other, assert_func, test_flex=True, **kwargs) if run_binary: if binary_comp is None: binary_comp = other + 1 self.run_binary_test(panel, binary_comp, assert_func, test_flex=False, **kwargs) self.run_binary_test(panel, binary_comp, assert_func, test_flex=True, **kwargs) def test_integer_arithmetic_frame(self): self.run_frame(self.integer, self.integer) def test_integer_arithmetic_series(self): self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0]) @slow def test_integer_panel(self): self.run_panel(_integer2_panel, np.random.randint(1, 100)) def test_float_arithemtic_frame(self): self.run_frame(self.frame2, self.frame2) def test_float_arithmetic_series(self): self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0]) @slow def test_float_panel(self): self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8) @slow def test_panel4d(self): self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5, assert_func=assert_panel4d_equal, binary_comp=3) def test_mixed_arithmetic_frame(self): # TODO: FIGURE OUT HOW TO GET IT TO WORK... # can't do arithmetic because comparison methods try to do *entire* # frame instead of by-column self.run_frame(self.mixed2, self.mixed2, run_binary=False) def test_mixed_arithmetic_series(self): for col in self.mixed2.columns: self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4) @slow def test_mixed_panel(self): self.run_panel(_mixed2_panel, np.random.randint(1, 100), binary_comp=-2) def test_float_arithemtic(self): self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal) self.run_arithmetic_test(self.frame.iloc[:, 0], self.frame.iloc[:, 0], assert_series_equal, check_dtype=True) def test_mixed_arithmetic(self): self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal) for col in self.mixed.columns: self.run_arithmetic_test(self.mixed[col], self.mixed[col], assert_series_equal) def test_integer_with_zeros(self): self.integer *= np.random.randint(0, 2, size=np.shape(self.integer)) self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal) self.run_arithmetic_test(self.integer.iloc[:, 0], self.integer.iloc[:, 0], assert_series_equal) def test_invalid(self): # no op result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate') self.assertFalse(result) # mixed result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate') self.assertFalse(result) # min elements result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate') self.assertFalse(result) # ok, we only check on first part of expression result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate') self.assertTrue(result) def test_binary_ops(self): def testit(): for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]: for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]: if op == 'div': op = getattr(operator, 'truediv', None) else: op = getattr(operator, op, None) if op is not None: result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate') self.assertNotEqual(result, f._is_mixed_type) result = expr.evaluate(op, op_str, f, f, use_numexpr=True) expected = expr.evaluate(op, op_str, f, f, use_numexpr=False) tm.assert_numpy_array_equal(result,expected.values) result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate') self.assertFalse(result) expr.set_use_numexpr(False) testit() expr.set_use_numexpr(True) expr.set_numexpr_threads(1) testit() expr.set_numexpr_threads() testit() def test_boolean_ops(self): def testit(): for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]: f11 = f f12 = f + 1 f21 = f2 f22 = f2 + 1 for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]: op = getattr(operator,op) result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate') self.assertNotEqual(result, f11._is_mixed_type) result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True) expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False) tm.assert_numpy_array_equal(result,expected.values) result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate') self.assertFalse(result) expr.set_use_numexpr(False) testit() expr.set_use_numexpr(True) expr.set_numexpr_threads(1) testit() expr.set_numexpr_threads() testit() def test_where(self): def testit(): for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]: for cond in [ True, False ]: c = np.empty(f.shape,dtype=np.bool_) c.fill(cond) result = expr.where(c, f.values, f.values+1) expected = np.where(c, f.values, f.values+1) tm.assert_numpy_array_equal(result,expected) expr.set_use_numexpr(False) testit() expr.set_use_numexpr(True) expr.set_numexpr_threads(1) testit() expr.set_numexpr_threads() testit() def test_bool_ops_raise_on_arithmetic(self): df = DataFrame({'a': np.random.rand(10) > 0.5, 'b': np.random.rand(10) > 0.5}) names = 'div', 'truediv', 'floordiv', 'pow' ops = '/', '/', '//', '**' msg = 'operator %r not implemented for bool dtypes' for op, name in zip(ops, names): if not compat.PY3 or name != 'div': f = getattr(operator, name) err_msg = re.escape(msg % op) with tm.assertRaisesRegexp(NotImplementedError, err_msg): f(df, df) with tm.assertRaisesRegexp(NotImplementedError, err_msg): f(df.a, df.b) with tm.assertRaisesRegexp(NotImplementedError, err_msg): f(df.a, True) with tm.assertRaisesRegexp(NotImplementedError, err_msg): f(False, df.a) with tm.assertRaisesRegexp(TypeError, err_msg): f(False, df) with tm.assertRaisesRegexp(TypeError, err_msg): f(df, True) def test_bool_ops_warn_on_arithmetic(self): n = 10 df = DataFrame({'a': np.random.rand(n) > 0.5, 'b': np.random.rand(n) > 0.5}) names = 'add', 'mul', 'sub' ops = '+', '*', '-' subs = {'+': '|', '*': '&', '-': '^'} sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'} for op, name in zip(ops, names): f = getattr(operator, name) fe = getattr(operator, sub_funcs[subs[op]]) with tm.use_numexpr(True, min_elements=5): with tm.assert_produces_warning(check_stacklevel=False): r = f(df, df) e = fe(df, df) tm.assert_frame_equal(r, e) with tm.assert_produces_warning(check_stacklevel=False): r = f(df.a, df.b) e = fe(df.a, df.b) tm.assert_series_equal(r, e) with tm.assert_produces_warning(check_stacklevel=False): r = f(df.a, True) e = fe(df.a, True) tm.assert_series_equal(r, e) with tm.assert_produces_warning(check_stacklevel=False): r = f(False, df.a) e = fe(False, df.a) tm.assert_series_equal(r, e) with tm.assert_produces_warning(check_stacklevel=False): r = f(False, df) e = fe(False, df) tm.assert_frame_equal(r, e) with tm.assert_produces_warning(check_stacklevel=False): r = f(df, True) e = fe(df, True) tm.assert_frame_equal(r, e) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
apache-2.0
drusk/pml
pml/unsupervised/clustering.py
1
11112
# Copyright (C) 2012 David Rusk # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Clustering algorithms for unsupervised learning tasks. @author: drusk """ import itertools import random import pandas as pd from pml.data import model from pml.utils.errors import UnlabelledDataSetError from pml.utils.distance_utils import euclidean from pml.utils.pandas_util import are_dataframes_equal class ClusteredDataSet(model.DataSet): """ A collection of data which has been analysed by a clustering algorithm. It contains both the original DataSet and the results of the clustering. It provides methods for analysing these clustering results. """ def __init__(self, dataset, cluster_assignments): """ Creates a new ClusteredDataSet. Args: dataset: model.DataSet A dataset which does not have cluster assignments. cluster_assignments: pandas.Series A Series with the cluster assignment for each sample in the dataset. """ super(ClusteredDataSet, self).__init__(dataset.get_data_frame(), dataset.get_labels()) self.cluster_assignments = cluster_assignments def get_cluster_assignments(self): """ Retrieves the cluster assignments produced for this dataset by a clustering algorithm. Returns: A pandas Series. It contains the index of the original dataset with a numerical value representing the cluster it is a part of. """ return self.cluster_assignments def calculate_purity(self): """ Calculate the purity, a measurement of quality for the clustering results. Each cluster is assigned to the class which is most frequent in the cluster. Using these classes, the percent accuracy is then calculated. Returns: A number between 0 and 1. Poor clusterings have a purity close to 0 while a perfect clustering has a purity of 1. Raises: UnlabelledDataSetError if the dataset is not labelled. """ if not self.is_labelled(): raise UnlabelledDataSetError() # get the set of unique cluster ids clusters = set(self.cluster_assignments.values) # find out what class is most frequent in each cluster cluster_classes = {} for cluster in clusters: # get the indices of rows in this cluster indices = self.cluster_assignments[self.cluster_assignments == cluster].index # filter the labels series down to those in this cluster cluster_labels = self.labels[indices] # assign the most common label to be the label for this cluster cluster_classes[cluster] = cluster_labels.value_counts().idxmax() def get_label(cluster): """ Get the label for a sample based on its cluster. """ return cluster_classes[cluster] # get the list of labels as determined by each cluster's most frequent # label labels_by_clustering = self.cluster_assignments.map(get_label) # See how the clustering labels compare with the actual labels. # Return the percentage of indices in agreement. num_agreed = 0 for ind in labels_by_clustering.index: if labels_by_clustering[ind] == self.labels[ind]: num_agreed += 1 return float(num_agreed) / labels_by_clustering.size def calculate_rand_index(self): """ Calculate the Rand index, a measurement of quality for the clustering results. It is essentially the percent accuracy of the clustering. The clustering is viewed as a series of decisions. There are N*(N-1)/2 pairs of samples in the dataset to be considered. The decision is considered correct if the pairs have the same label and are in the same cluster, or have different labels and are in different clusters. The number of correct decisions divided by the total number of decisions gives the Rand index, or accuracy. Returns: The accuracy, a number between 0 and 1. The closer to 1, the better the clustering. Raises: UnlabelledDataSetError if the dataset is not labelled. """ if not self.is_labelled(): raise UnlabelledDataSetError() correct = 0 total = 0 for index_combo in itertools.combinations(self.get_sample_ids(), 2): index1 = index_combo[0] index2 = index_combo[1] same_class = (self.labels[index1] == self.labels[index2]) same_cluster = (self.cluster_assignments[index1] == self.cluster_assignments[index2]) if same_class and same_cluster: correct += 1 elif not same_class and not same_cluster: correct += 1 total += 1 return float(correct) / total def create_random_centroids(dataset, k): """ Initializes centroids at random positions. The random value chosen for each feature will always be limited to the range of values found in the dataset. For example, if a certain feature has a minimum value of 0 in the dataset, and maximum value of 9, the value chosen will be between 0 and 9. Args: dataset: DataSet The DataSet to create the random centroids for. k: int The number of centroids to create. Returns: A list of centroids. Each centroid is a pandas Series with the same labels as the dataset's headers. """ min_maxs = zip(dataset.reduce_features(min).values, dataset.reduce_features(max).values) def rand_range(range_tuple): """ Generates a random floating point number in the range specified by the tuple. """ return random.uniform(range_tuple[0], range_tuple[1]) return [pd.Series(map(rand_range, min_maxs), index=dataset.feature_list(), name=i) for i in range(k)] def kmeans(dataset, k=2, distance=euclidean, centroids=None): """ K-means clustering algorithm. This algorithm partitions a dataset into k clusters in which each observation (sample) belongs to the cluster with the nearest mean. Args: dataset: model.DataSet The DataSet to perform the clustering on. k: int The number of clusters to partition the dataset into. centroids: list of pandas Series The initial centroids for the clusters. Defaults to None in which case they are selected randomly. Returns: A ClusteredDataSet which contains the cluster assignments as well as the original data. In the cluster assignments, each sample index is assigned a numerical value representing the cluster it is part of. """ # If dataset is not already a model.DataSet object, make it one. dataset = model.as_dataset(dataset) # Initialize k centroids if centroids is None: centroids = create_random_centroids(dataset, k) # Iteratively compute best clusters until they stabilize assignments = None clusters_changed = True while clusters_changed: centroids, new_assignments = _compute_iteration(dataset, centroids, distance) if are_dataframes_equal(new_assignments, assignments): clusters_changed = False assignments = new_assignments return ClusteredDataSet(dataset, assignments) def _get_distances_to_centroids(dataset, centroids, distance_measure): """ Calculates the calc_distance from each data point to each centroid. Args: dataset: model.DataSet The DataSet whose samples are being centroids: list of pandas Series The centroids to compare each data point with. Returns: A pandas DataFrame with a row for each sample in dataset and a column for the distance to each centroid. """ distances = {} for i, centroid in enumerate(centroids): def calc_distance(sample): return distance_measure(sample, centroid) distances[i] = dataset.reduce_rows(calc_distance) # each dictionary entry is interpreted as a column return pd.DataFrame(distances) def _compute_iteration(dataset, centroids, distance_measure): """ Computes an iteration of the k-means algorithm. Args: dataset: model.DataSet The dataset being clustered. centroids: list of pandas Series The current centroids at the start of the iteration. Returns: new_centroids: list of pandas Series The updated centroids. cluster_assignments: pandas Series The current cluster assignments for each sample. """ # Calculate calc_distance from each data point to each centroid distances = _get_distances_to_centroids(dataset, centroids, distance_measure) # Find each datapoint's nearest centroid cluster_assignments = distances.idxmin(axis=1) def nearest_centroid(sample_index): return cluster_assignments[sample_index] # Calculate mean position of datapoints in each centroid's cluster new_centroids = dataset.get_data_frame().groupby(nearest_centroid).mean() # XXX turning each row in dataframe into a series... refactor! list_of_series = [new_centroids.ix[ind] for ind in new_centroids.index] return list_of_series, cluster_assignments
mit
r39132/airflow
tests/contrib/operators/test_hive_to_dynamodb_operator.py
7
5053
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import json import unittest import datetime import mock import pandas as pd from airflow import configuration, DAG from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook import airflow.contrib.operators.hive_to_dynamodb configuration.load_test_config() DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] try: from moto import mock_dynamodb2 except ImportError: mock_dynamodb2 = None class HiveToDynamoDBTransferOperatorTest(unittest.TestCase): def setUp(self): configuration.load_test_config() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag = dag self.sql = 'SELECT 1' self.hook = AwsDynamoDBHook( aws_conn_id='aws_default', region_name='us-east-1') @staticmethod def process_data(data, *args, **kwargs): return json.loads(data.to_json(orient='records')) @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present') @mock_dynamodb2 def test_get_conn_returns_a_boto3_connection(self): hook = AwsDynamoDBHook(aws_conn_id='aws_default') self.assertIsNotNone(hook.get_conn()) @mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df', return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name'])) @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present') @mock_dynamodb2 def test_get_records_with_schema(self, get_results_mock): # this table needs to be created in production self.hook.get_conn().create_table( TableName='test_airflow', KeySchema=[ { 'AttributeName': 'id', 'KeyType': 'HASH' }, ], AttributeDefinitions=[ { 'AttributeName': 'name', 'AttributeType': 'S' } ], ProvisionedThroughput={ 'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10 } ) operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator( sql=self.sql, table_name="test_airflow", task_id='hive_to_dynamodb_check', table_keys=['id'], dag=self.dag) operator.execute(None) table = self.hook.get_conn().Table('test_airflow') table.meta.client.get_waiter( 'table_exists').wait(TableName='test_airflow') self.assertEqual(table.item_count, 1) @mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df', return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name'])) @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present') @mock_dynamodb2 def test_pre_process_records_with_schema(self, get_results_mock): # this table needs to be created in production self.hook.get_conn().create_table( TableName='test_airflow', KeySchema=[ { 'AttributeName': 'id', 'KeyType': 'HASH' }, ], AttributeDefinitions=[ { 'AttributeName': 'name', 'AttributeType': 'S' } ], ProvisionedThroughput={ 'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10 } ) operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator( sql=self.sql, table_name='test_airflow', task_id='hive_to_dynamodb_check', table_keys=['id'], pre_process=self.process_data, dag=self.dag) operator.execute(None) table = self.hook.get_conn().Table('test_airflow') table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow') self.assertEqual(table.item_count, 1) if __name__ == '__main__': unittest.main()
apache-2.0
jiangzhonglian/MachineLearning
src/py2.x/ml/6.SVM/svm-complete_Non-Kernel.py
1
13440
#!/usr/bin/python # coding:utf8 """ Created on Nov 4, 2010 Update on 2017-05-18 Chapter 5 source file for Machine Learing in Action Author: Peter/geekidentity/片刻 GitHub: https://github.com/apachecn/AiLearning """ from __future__ import print_function from numpy import * import matplotlib.pyplot as plt class optStruct: def __init__(self, dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters self.X = dataMatIn self.labelMat = classLabels self.C = C self.tol = toler self.m = shape(dataMatIn)[0] self.alphas = mat(zeros((self.m, 1))) self.b = 0 self.eCache = mat(zeros((self.m, 2))) # first column is valid flag def loadDataSet(fileName): """loadDataSet(对文件进行逐行解析,从而得到第行的类标签和整个数据矩阵) Args: fileName 文件名 Returns: dataMat 数据矩阵 labelMat 类标签 """ dataMat = [] labelMat = [] fr = open(fileName) for line in fr.readlines(): lineArr = line.strip().split('\t') dataMat.append([float(lineArr[0]), float(lineArr[1])]) labelMat.append(float(lineArr[2])) return dataMat, labelMat def selectJrand(i, m): """ 随机选择一个整数 Args: i 第一个alpha的下标 m 所有alpha的数目 Returns: j 返回一个不为i的随机数,在0~m之间的整数值 """ j = i while j == i: j = int(random.uniform(0, m)) return j def clipAlpha(aj, H, L): """clipAlpha(调整aj的值,使aj处于 L<=aj<=H) Args: aj 目标值 H 最大值 L 最小值 Returns: aj 目标值 """ if aj > H: aj = H if L > aj: aj = L return aj def calcEk(oS, k): """calcEk(求 Ek误差:预测值-真实值的差) 该过程在完整版的SMO算法中陪出现次数较多,因此将其单独作为一个方法 Args: oS optStruct对象 k 具体的某一行 Returns: Ek 预测结果与真实结果比对,计算误差Ek """ fXk = float(multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k, :].T)) + oS.b Ek = fXk - float(oS.labelMat[k]) return Ek def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej """selectJ(返回最优的j和Ej) 内循环的启发式方法。 选择第二个(内循环)alpha的alpha值 这里的目标是选择合适的第二个alpha值以保证每次优化中采用最大步长。 该函数的误差与第一个alpha值Ei和下标i有关。 Args: i 具体的第i一行 oS optStruct对象 Ei 预测结果与真实结果比对,计算误差Ei Returns: j 随机选出的第j一行 Ej 预测结果与真实结果比对,计算误差Ej """ maxK = -1 maxDeltaE = 0 Ej = 0 # 首先将输入值Ei在缓存中设置成为有效的。这里的有效意味着它已经计算好了。 oS.eCache[i] = [1, Ei] # print 'oS.eCache[%s]=%s' % (i, oS.eCache[i]) # print 'oS.eCache[:, 0].A=%s' % oS.eCache[:, 0].A.T # """ # # 返回非0的:行列值 # nonzero(oS.eCache[:, 0].A)= ( # 行: array([ 0, 2, 4, 5, 8, 10, 17, 18, 20, 21, 23, 25, 26, 29, 30, 39, 46,52, 54, 55, 62, 69, 70, 76, 79, 82, 94, 97]), # 列: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]) # ) # """ # print 'nonzero(oS.eCache[:, 0].A)=', nonzero(oS.eCache[:, 0].A) # # 取行的list # print 'nonzero(oS.eCache[:, 0].A)[0]=', nonzero(oS.eCache[:, 0].A)[0] # 非零E值的行的list列表,所对应的alpha值 validEcacheList = nonzero(oS.eCache[:, 0].A)[0] if (len(validEcacheList)) > 1: for k in validEcacheList: # 在所有的值上进行循环,并选择其中使得改变最大的那个值 if k == i: continue # don't calc for i, waste of time # 求 Ek误差:预测值-真实值的差 Ek = calcEk(oS, k) deltaE = abs(Ei - Ek) if (deltaE > maxDeltaE): maxK = k maxDeltaE = deltaE Ej = Ek return maxK, Ej else: # 如果是第一次循环,则随机选择一个alpha值 j = selectJrand(i, oS.m) # 求 Ek误差:预测值-真实值的差 Ej = calcEk(oS, j) return j, Ej def updateEk(oS, k): # after any alpha has changed update the new value in the cache """updateEk(计算误差值并存入缓存中。) 在对alpha值进行优化之后会用到这个值。 Args: oS optStruct对象 k 某一列的行号 """ # 求 误差:预测值-真实值的差 Ek = calcEk(oS, k) oS.eCache[k] = [1, Ek] def innerL(i, oS): """innerL 内循环代码 Args: i 具体的某一行 oS optStruct对象 Returns: 0 找不到最优的值 1 找到了最优的值,并且oS.Cache到缓存中 """ # 求 Ek误差:预测值-真实值的差 Ei = calcEk(oS, i) # 约束条件 (KKT条件是解决最优化问题的时用到的一种方法。我们这里提到的最优化问题通常是指对于给定的某一函数,求其在指定作用域上的全局最小值) # 0<=alphas[i]<=C,但由于0和C是边界值,我们无法进行优化,因为需要增加一个alphas和降低一个alphas。 # 表示发生错误的概率:labelMat[i]*Ei 如果超出了 toler, 才需要优化。至于正负号,我们考虑绝对值就对了。 ''' # 检验训练样本(xi, yi)是否满足KKT条件 yi*f(i) >= 1 and alpha = 0 (outside the boundary) yi*f(i) == 1 and 0<alpha< C (on the boundary) yi*f(i) <= 1 and alpha = C (between the boundary) ''' if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)): # 选择最大的误差对应的j进行优化。效果更明显 j, Ej = selectJ(i, oS, Ei) alphaIold = oS.alphas[i].copy() alphaJold = oS.alphas[j].copy() # L和H用于将alphas[j]调整到0-C之间。如果L==H,就不做任何改变,直接return 0 if (oS.labelMat[i] != oS.labelMat[j]): L = max(0, oS.alphas[j] - oS.alphas[i]) H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i]) else: L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C) H = min(oS.C, oS.alphas[j] + oS.alphas[i]) if L == H: print("L==H") return 0 # eta是alphas[j]的最优修改量,如果eta==0,需要退出for循环的当前迭代过程 # 参考《统计学习方法》李航-P125~P128<序列最小最优化算法> eta = 2.0 * oS.X[i, :] * oS.X[j, :].T - oS.X[i, :] * oS.X[i, :].T - oS.X[j, :] * oS.X[j, :].T if eta >= 0: print("eta>=0") return 0 # 计算出一个新的alphas[j]值 oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta # 并使用辅助函数,以及L和H对其进行调整 oS.alphas[j] = clipAlpha(oS.alphas[j], H, L) # 更新误差缓存 updateEk(oS, j) # 检查alpha[j]是否只是轻微的改变,如果是的话,就退出for循环。 if (abs(oS.alphas[j] - alphaJold) < 0.00001): print("j not moving enough") return 0 # 然后alphas[i]和alphas[j]同样进行改变,虽然改变的大小一样,但是改变的方向正好相反 oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j]) # 更新误差缓存 updateEk(oS, i) # 在对alpha[i], alpha[j] 进行优化之后,给这两个alpha值设置一个常数b。 # w= Σ[1~n] ai*yi*xi => b = yj Σ[1~n] ai*yi(xi*xj) # 所以: b1 - b = (y1-y) - Σ[1~n] yi*(a1-a)*(xi*x1) # 为什么减2遍? 因为是 减去Σ[1~n],正好2个变量i和j,所以减2遍 b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[i, :].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[i, :] * oS.X[j, :].T b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[j, :].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[j, :] * oS.X[j, :].T if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1 elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2 else: oS.b = (b1 + b2) / 2.0 return 1 else: return 0 def smoP(dataMatIn, classLabels, C, toler, maxIter): """ 完整SMO算法外循环,与smoSimple有些类似,但这里的循环退出条件更多一些 Args: dataMatIn 数据集 classLabels 类别标签 C 松弛变量(常量值),允许有些数据点可以处于分隔面的错误一侧。 控制最大化间隔和保证大部分的函数间隔小于1.0这两个目标的权重。 可以通过调节该参数达到不同的结果。 toler 容错率 maxIter 退出前最大的循环次数 Returns: b 模型的常量值 alphas 拉格朗日乘子 """ # 创建一个 optStruct 对象 oS = optStruct(mat(dataMatIn), mat(classLabels).transpose(), C, toler) iter = 0 entireSet = True alphaPairsChanged = 0 # 循环遍历:循环maxIter次 并且 (alphaPairsChanged存在可以改变 or 所有行遍历一遍) # 循环迭代结束 或者 循环遍历所有alpha后,alphaPairs还是没变化 while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): alphaPairsChanged = 0 # 当entireSet=true or 非边界alpha对没有了;就开始寻找 alpha对,然后决定是否要进行else。 if entireSet: # 在数据集上遍历所有可能的alpha for i in range(oS.m): # 是否存在alpha对,存在就+1 alphaPairsChanged += innerL(i, oS) print("fullSet, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged)) iter += 1 # 对已存在 alpha对,选出非边界的alpha值,进行优化。 else: # 遍历所有的非边界alpha值,也就是不在边界0或C上的值。 nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] for i in nonBoundIs: alphaPairsChanged += innerL(i, oS) print("non-bound, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged)) iter += 1 # 如果找到alpha对,就优化非边界alpha值,否则,就重新进行寻找,如果寻找一遍 遍历所有的行还是没找到,就退出循环。 if entireSet: entireSet = False # toggle entire set loop elif (alphaPairsChanged == 0): entireSet = True print("iteration number: %d" % iter) return oS.b, oS.alphas def calcWs(alphas, dataArr, classLabels): """ 基于alpha计算w值 Args: alphas 拉格朗日乘子 dataArr feature数据集 classLabels 目标变量数据集 Returns: wc 回归系数 """ X = mat(dataArr) labelMat = mat(classLabels).transpose() m, n = shape(X) w = zeros((n, 1)) for i in range(m): w += multiply(alphas[i] * labelMat[i], X[i, :].T) return w def plotfig_SVM(xArr, yArr, ws, b, alphas): """ 参考地址: http://blog.csdn.net/maoersong/article/details/24315633 http://www.cnblogs.com/JustForCS/p/5283489.html http://blog.csdn.net/kkxgx/article/details/6951959 """ xMat = mat(xArr) yMat = mat(yArr) # b原来是矩阵,先转为数组类型后其数组大小为(1,1),所以后面加[0],变为(1,) b = array(b)[0] fig = plt.figure() ax = fig.add_subplot(111) # 注意flatten的用法 ax.scatter(xMat[:, 0].flatten().A[0], xMat[:, 1].flatten().A[0]) # x最大值,最小值根据原数据集dataArr[:, 0]的大小而定 x = arange(-1.0, 10.0, 0.1) # 根据x.w + b = 0 得到,其式子展开为w0.x1 + w1.x2 + b = 0, x2就是y值 y = (-b-ws[0, 0]*x)/ws[1, 0] ax.plot(x, y) for i in range(shape(yMat[0, :])[1]): if yMat[0, i] > 0: ax.plot(xMat[i, 0], xMat[i, 1], 'cx') else: ax.plot(xMat[i, 0], xMat[i, 1], 'kp') # 找到支持向量,并在图中标红 for i in range(100): if alphas[i] > 0.0: ax.plot(xMat[i, 0], xMat[i, 1], 'ro') plt.show() if __name__ == "__main__": # 获取特征和目标变量 dataArr, labelArr = loadDataSet('data/6.SVM/testSet.txt') # print labelArr # b是常量值, alphas是拉格朗日乘子 b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40) print('/n/n/n') print('b=', b) print('alphas[alphas>0]=', alphas[alphas > 0]) print('shape(alphas[alphas > 0])=', shape(alphas[alphas > 0])) for i in range(100): if alphas[i] > 0: print(dataArr[i], labelArr[i]) # 画图 ws = calcWs(alphas, dataArr, labelArr) plotfig_SVM(dataArr, labelArr, ws, b, alphas)
gpl-3.0
Titan-C/sympy
sympy/physics/quantum/circuitplot.py
6
12937
"""Matplotlib based plotting of quantum circuits. Todo: * Optimize printing of large circuits. * Get this to work with single gates. * Do a better job checking the form of circuits to make sure it is a Mul of Gates. * Get multi-target gates plotting. * Get initial and final states to plot. * Get measurements to plot. Might need to rethink measurement as a gate issue. * Get scale and figsize to be handled in a better way. * Write some tests/examples! """ from __future__ import print_function, division from sympy import Mul from sympy.core.compatibility import u, range from sympy.external import import_module from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS from sympy.core.core import BasicMeta from sympy.core.assumptions import ManagedProperties __all__ = [ 'CircuitPlot', 'circuit_plot', 'labeller', 'Mz', 'Mx', 'CreateOneQubitGate', 'CreateCGate', ] np = import_module('numpy') matplotlib = import_module( 'matplotlib', __import__kwargs={'fromlist': ['pyplot']}, catch=(RuntimeError,)) # This is raised in environments that have no display. if not np or not matplotlib: class CircuitPlot(object): def __init__(*args, **kwargs): raise ImportError('numpy or matplotlib not available.') def circuit_plot(*args, **kwargs): raise ImportError('numpy or matplotlib not available.') else: pyplot = matplotlib.pyplot Line2D = matplotlib.lines.Line2D Circle = matplotlib.patches.Circle #from matplotlib import rc #rc('text',usetex=True) class CircuitPlot(object): """A class for managing a circuit plot.""" scale = 1.0 fontsize = 20.0 linewidth = 1.0 control_radius = 0.05 not_radius = 0.15 swap_delta = 0.05 labels = [] inits = {} label_buffer = 0.5 def __init__(self, c, nqubits, **kwargs): self.circuit = c self.ngates = len(self.circuit.args) self.nqubits = nqubits self.update(kwargs) self._create_grid() self._create_figure() self._plot_wires() self._plot_gates() self._finish() def update(self, kwargs): """Load the kwargs into the instance dict.""" self.__dict__.update(kwargs) def _create_grid(self): """Create the grid of wires.""" scale = self.scale wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float) gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float) self._wire_grid = wire_grid self._gate_grid = gate_grid def _create_figure(self): """Create the main matplotlib figure.""" self._figure = pyplot.figure( figsize=(self.ngates*self.scale, self.nqubits*self.scale), facecolor='w', edgecolor='w' ) ax = self._figure.add_subplot( 1, 1, 1, frameon=True ) ax.set_axis_off() offset = 0.5*self.scale ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset) ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset) ax.set_aspect('equal') self._axes = ax def _plot_wires(self): """Plot the wires of the circuit diagram.""" xstart = self._gate_grid[0] xstop = self._gate_grid[-1] xdata = (xstart - self.scale, xstop + self.scale) for i in range(self.nqubits): ydata = (self._wire_grid[i], self._wire_grid[i]) line = Line2D( xdata, ydata, color='k', lw=self.linewidth ) self._axes.add_line(line) if self.labels: init_label_buffer = 0 if self.inits.get(self.labels[i]): init_label_buffer = 0.25 self._axes.text( xdata[0]-self.label_buffer-init_label_buffer,ydata[0], render_label(self.labels[i],self.inits), size=self.fontsize, color='k',ha='center',va='center') self._plot_measured_wires() def _plot_measured_wires(self): ismeasured = self._measurements() xstop = self._gate_grid[-1] dy = 0.04 # amount to shift wires when doubled # Plot doubled wires after they are measured for im in ismeasured: xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale) ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy) line = Line2D( xdata, ydata, color='k', lw=self.linewidth ) self._axes.add_line(line) # Also double any controlled lines off these wires for i,g in enumerate(self._gates()): if isinstance(g, CGate) or isinstance(g, CGateS): wires = g.controls + g.targets for wire in wires: if wire in ismeasured and \ self._gate_grid[i] > self._gate_grid[ismeasured[wire]]: ydata = min(wires), max(wires) xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy line = Line2D( xdata, ydata, color='k', lw=self.linewidth ) self._axes.add_line(line) def _gates(self): """Create a list of all gates in the circuit plot.""" gates = [] if isinstance(self.circuit, Mul): for g in reversed(self.circuit.args): if isinstance(g, Gate): gates.append(g) elif isinstance(self.circuit, Gate): gates.append(self.circuit) return gates def _plot_gates(self): """Iterate through the gates and plot each of them.""" for i, gate in enumerate(self._gates()): gate.plot_gate(self, i) def _measurements(self): """Return a dict {i:j} where i is the index of the wire that has been measured, and j is the gate where the wire is measured. """ ismeasured = {} for i,g in enumerate(self._gates()): if getattr(g,'measurement',False): for target in g.targets: if target in ismeasured: if ismeasured[target] > i: ismeasured[target] = i else: ismeasured[target] = i return ismeasured def _finish(self): # Disable clipping to make panning work well for large circuits. for o in self._figure.findobj(): o.set_clip_on(False) def one_qubit_box(self, t, gate_idx, wire_idx): """Draw a box for a single qubit gate.""" x = self._gate_grid[gate_idx] y = self._wire_grid[wire_idx] self._axes.text( x, y, t, color='k', ha='center', va='center', bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth), size=self.fontsize ) def two_qubit_box(self, t, gate_idx, wire_idx): """Draw a box for a two qubit gate. Doesn't work yet. """ x = self._gate_grid[gate_idx] y = self._wire_grid[wire_idx]+0.5 print(self._gate_grid) print(self._wire_grid) obj = self._axes.text( x, y, t, color='k', ha='center', va='center', bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth), size=self.fontsize ) def control_line(self, gate_idx, min_wire, max_wire): """Draw a vertical control line.""" xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx]) ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire]) line = Line2D( xdata, ydata, color='k', lw=self.linewidth ) self._axes.add_line(line) def control_point(self, gate_idx, wire_idx): """Draw a control point.""" x = self._gate_grid[gate_idx] y = self._wire_grid[wire_idx] radius = self.control_radius c = Circle( (x, y), radius*self.scale, ec='k', fc='k', fill=True, lw=self.linewidth ) self._axes.add_patch(c) def not_point(self, gate_idx, wire_idx): """Draw a NOT gates as the circle with plus in the middle.""" x = self._gate_grid[gate_idx] y = self._wire_grid[wire_idx] radius = self.not_radius c = Circle( (x, y), radius, ec='k', fc='w', fill=False, lw=self.linewidth ) self._axes.add_patch(c) l = Line2D( (x, x), (y - radius, y + radius), color='k', lw=self.linewidth ) self._axes.add_line(l) def swap_point(self, gate_idx, wire_idx): """Draw a swap point as a cross.""" x = self._gate_grid[gate_idx] y = self._wire_grid[wire_idx] d = self.swap_delta l1 = Line2D( (x - d, x + d), (y - d, y + d), color='k', lw=self.linewidth ) l2 = Line2D( (x - d, x + d), (y + d, y - d), color='k', lw=self.linewidth ) self._axes.add_line(l1) self._axes.add_line(l2) def circuit_plot(c, nqubits, **kwargs): """Draw the circuit diagram for the circuit with nqubits. Parameters ========== c : circuit The circuit to plot. Should be a product of Gate instances. nqubits : int The number of qubits to include in the circuit. Must be at least as big as the largest `min_qubits`` of the gates. """ return CircuitPlot(c, nqubits, **kwargs) def render_label(label, inits={}): """Slightly more flexible way to render labels. >>> from sympy.physics.quantum.circuitplot import render_label >>> render_label('q0') '$|q0\\\\rangle$' >>> render_label('q0', {'q0':'0'}) '$|q0\\\\rangle=|0\\\\rangle$' """ init = inits.get(label) if init: return r'$|%s\rangle=|%s\rangle$' % (label, init) return r'$|%s\rangle$' % label def labeller(n, symbol='q'): """Autogenerate labels for wires of quantum circuits. Parameters ========== n : int number of qubits in the circuit symbol : string A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc. >>> from sympy.physics.quantum.circuitplot import labeller >>> labeller(2) ['q_1', 'q_0'] >>> labeller(3,'j') ['j_2', 'j_1', 'j_0'] """ return ['%s_%d' % (symbol,n-i-1) for i in range(n)] class Mz(OneQubitGate): """Mock-up of a z measurement gate. This is in circuitplot rather than gate.py because it's not a real gate, it just draws one. """ measurement = True gate_name='Mz' gate_name_latex=u'M_z' class Mx(OneQubitGate): """Mock-up of an x measurement gate. This is in circuitplot rather than gate.py because it's not a real gate, it just draws one. """ measurement = True gate_name='Mx' gate_name_latex=u'M_x' class CreateOneQubitGate(ManagedProperties): def __new__(mcl, name, latexname=None): if not latexname: latexname = name return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,), {'gate_name': name, 'gate_name_latex': latexname}) def CreateCGate(name, latexname=None): """Use a lexical closure to make a controlled gate. """ if not latexname: latexname = name onequbitgate = CreateOneQubitGate(name, latexname) def ControlledGate(ctrls,target): return CGate(tuple(ctrls),onequbitgate(target)) return ControlledGate
bsd-3-clause
LiaoPan/scikit-learn
examples/svm/plot_iris.py
225
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
joernhees/scikit-learn
sklearn/ensemble/weight_boosting.py
29
41090
"""Weight Boosting This module contains weight boosting estimators for both classification and regression. The module structure is the following: - The ``BaseWeightBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ from each other in the loss function that is optimized. - ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for classification problems. - ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for regression problems. """ # Authors: Noel Dawe <noel@dawe.me> # Gilles Louppe <g.louppe@gmail.com> # Hamzeh Alsalhi <ha258@cornell.edu> # Arnaud Joly <arnaud.v.joly@gmail.com> # # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from numpy.core.umath_tests import inner1d from .base import BaseEnsemble from ..base import ClassifierMixin, RegressorMixin, is_regressor from ..externals import six from ..externals.six.moves import zip from ..externals.six.moves import xrange as range from .forest import BaseForest from ..tree import DecisionTreeClassifier, DecisionTreeRegressor from ..tree.tree import BaseDecisionTree from ..tree._tree import DTYPE from ..utils import check_array, check_X_y, check_random_state from ..utils.extmath import stable_cumsum from ..metrics import accuracy_score, r2_score from sklearn.utils.validation import has_fit_parameter, check_is_fitted __all__ = [ 'AdaBoostClassifier', 'AdaBoostRegressor', ] class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)): """Base class for AdaBoost estimators. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator=None, n_estimators=50, estimator_params=tuple(), learning_rate=1., random_state=None): super(BaseWeightBoosting, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, estimator_params=estimator_params) self.learning_rate = learning_rate self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted classifier/regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is forced to DTYPE from tree._tree if the base classifier of this ensemble weighted boosting classifier is a tree or forest. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check parameters if self.learning_rate <= 0: raise ValueError("learning_rate must be greater than zero") if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): dtype = DTYPE accept_sparse = 'csc' else: dtype = None accept_sparse = ['csr', 'csc'] X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype, y_numeric=is_regressor(self)) if sample_weight is None: # Initialize weights to 1 / n_samples sample_weight = np.empty(X.shape[0], dtype=np.float64) sample_weight[:] = 1. / X.shape[0] else: sample_weight = check_array(sample_weight, ensure_2d=False) # Normalize existing weights sample_weight = sample_weight / sample_weight.sum(dtype=np.float64) # Check that the sample weights sum is positive if sample_weight.sum() <= 0: raise ValueError( "Attempting to fit with a non-positive " "weighted number of samples.") # Check parameters self._validate_estimator() # Clear any previous fit results self.estimators_ = [] self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) random_state = check_random_state(self.random_state) for iboost in range(self.n_estimators): # Boosting step sample_weight, estimator_weight, estimator_error = self._boost( iboost, X, y, sample_weight, random_state) # Early termination if sample_weight is None: break self.estimator_weights_[iboost] = estimator_weight self.estimator_errors_[iboost] = estimator_error # Stop if error is zero if estimator_error == 0: break sample_weight_sum = np.sum(sample_weight) # Stop if the sum of sample weights has become non-positive if sample_weight_sum <= 0: break if iboost < self.n_estimators - 1: # Normalize sample_weight /= sample_weight_sum return self @abstractmethod def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost. Warning: This method needs to be overridden by subclasses. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. error : float The classification error for the current boost. If None then boosting has terminated early. """ pass def staged_score(self, X, y, sample_weight=None): """Return staged scores for X, y. This generator method yields the ensemble score after each iteration of boosting and therefore allows monitoring, such as to determine the score on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like, shape = [n_samples] Labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- z : float """ for y_pred in self.staged_predict(X): if isinstance(self, ClassifierMixin): yield accuracy_score(y, y_pred, sample_weight=sample_weight) else: yield r2_score(y, y_pred, sample_weight=sample_weight) @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, " "call `fit` before `feature_importances_`.") try: norm = self.estimator_weights_.sum() return (sum(weight * clf.feature_importances_ for weight, clf in zip(self.estimator_weights_, self.estimators_)) / norm) except AttributeError: raise AttributeError( "Unable to compute feature importances " "since base_estimator does not have a " "feature_importances_ attribute") def _validate_X_predict(self, X): """Ensure that X is in the proper format""" if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): X = check_array(X, accept_sparse='csr', dtype=DTYPE) else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return X def _samme_proba(estimator, n_classes, X): """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ proba = estimator.predict_proba(X) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps log_proba = np.log(proba) return (n_classes - 1) * (log_proba - (1. / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]) class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin): """An AdaBoost classifier. An AdaBoost [1] classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases. This class implements the algorithm known as AdaBoost-SAMME [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeClassifier) The base estimator from which the boosted ensemble is built. Support for sample weighting is required, as well as proper `classes_` and `n_classes_` attributes. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each classifier by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R') If 'SAMME.R' then use the SAMME.R real boosting algorithm. ``base_estimator`` must support calculation of class probabilities. If 'SAMME' then use the SAMME discrete boosting algorithm. The SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. classes_ : array of shape = [n_classes] The classes labels. n_classes_ : int The number of classes. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Classification error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., algorithm='SAMME.R', random_state=None): super(AdaBoostClassifier, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.algorithm = algorithm def fit(self, X, y, sample_weight=None): """Build a boosted classifier from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to ``1 / n_samples``. Returns ------- self : object Returns self. """ # Check that algorithm is supported if self.algorithm not in ('SAMME', 'SAMME.R'): raise ValueError("algorithm %s is not supported" % self.algorithm) # Fit return super(AdaBoostClassifier, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostClassifier, self)._validate_estimator( default=DecisionTreeClassifier(max_depth=1)) # SAMME-R requires predict_proba-enabled base estimators if self.algorithm == 'SAMME.R': if not hasattr(self.base_estimator_, 'predict_proba'): raise TypeError( "AdaBoostClassifier with algorithm='SAMME.R' requires " "that the weak learner supports the calculation of class " "probabilities with a predict_proba method.\n" "Please change the base estimator or set " "algorithm='SAMME' instead.") if not has_fit_parameter(self.base_estimator_, "sample_weight"): raise ValueError("%s doesn't support sample_weight." % self.base_estimator_.__class__.__name__) def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost. Perform a single boost according to the real multi-class SAMME.R algorithm or to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early. """ if self.algorithm == 'SAMME.R': return self._boost_real(iboost, X, y, sample_weight, random_state) else: # elif self.algorithm == "SAMME": return self._boost_discrete(iboost, X, y, sample_weight, random_state) def _boost_real(self, iboost, X, y, sample_weight, random_state): """Implement a single boost using the SAMME.R real algorithm.""" estimator = self._make_estimator(random_state=random_state) estimator.fit(X, y, sample_weight=sample_weight) y_predict_proba = estimator.predict_proba(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. # Construct y coding as described in Zhu et al [2]: # # y_k = 1 if c == k else -1 / (K - 1) # # where K == n_classes_ and c, k in [0, K) are indices along the second # axis of the y coding with c being the index corresponding to the true # class label. n_classes = self.n_classes_ classes = self.classes_ y_codes = np.array([-1. / (n_classes - 1), 1.]) y_coding = y_codes.take(classes == y[:, np.newaxis]) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. proba = y_predict_proba # alias for readability proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps # Boost weight using multi-class AdaBoost SAMME.R alg estimator_weight = (-1. * self.learning_rate * (((n_classes - 1.) / n_classes) * inner1d(y_coding, np.log(y_predict_proba)))) # Only boost the weights if it will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight *= np.exp(estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, 1., estimator_error def _boost_discrete(self, iboost, X, y, sample_weight, random_state): """Implement a single boost using the SAMME discrete algorithm.""" estimator = self._make_estimator(random_state=random_state) estimator.fit(X, y, sample_weight=sample_weight) y_predict = estimator.predict(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. n_classes = self.n_classes_ # Stop if the error is at least as bad as random guessing if estimator_error >= 1. - (1. / n_classes): self.estimators_.pop(-1) if len(self.estimators_) == 0: raise ValueError('BaseClassifier in AdaBoostClassifier ' 'ensemble is worse than random, ensemble ' 'can not be fit.') return None, None, None # Boost weight using multi-class AdaBoost SAMME alg estimator_weight = self.learning_rate * ( np.log((1. - estimator_error) / estimator_error) + np.log(n_classes - 1.)) # Only boost the weights if I will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight *= np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, estimator_weight, estimator_error def predict(self, X): """Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted classes. """ pred = self.decision_function(X) if self.n_classes_ == 2: return self.classes_.take(pred > 0, axis=0) return self.classes_.take(np.argmax(pred, axis=1), axis=0) def staged_predict(self, X): """Return staged predictions for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : generator of array, shape = [n_samples] The predicted classes. """ n_classes = self.n_classes_ classes = self.classes_ if n_classes == 2: for pred in self.staged_decision_function(X): yield np.array(classes.take(pred > 0, axis=0)) else: for pred in self.staged_decision_function(X): yield np.array(classes.take( np.argmax(pred, axis=1), axis=0)) def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R pred = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" pred = sum((estimator.predict(X) == classes).T * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) pred /= self.estimator_weights_.sum() if n_classes == 2: pred[:, 0] *= -1 return pred.sum(axis=1) return pred def staged_decision_function(self, X): """Compute decision function of ``X`` for each boosting iteration. This method allows monitoring (i.e. determine error on testing set) after each boosting iteration. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_pred = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_pred = estimator.predict(X) current_pred = (current_pred == classes).T * weight if pred is None: pred = current_pred else: pred += current_pred if n_classes == 2: tmp_pred = np.copy(pred) tmp_pred[:, 0] *= -1 yield (tmp_pred / norm).sum(axis=1) else: yield pred / norm def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ check_is_fitted(self, "n_classes_") n_classes = self.n_classes_ X = self._validate_X_predict(X) if n_classes == 1: return np.ones((X.shape[0], 1)) if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R proba = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" proba = sum(estimator.predict_proba(X) * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) proba /= self.estimator_weights_.sum() proba = np.exp((1. / (n_classes - 1)) * proba) normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba def staged_predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. This generator method yields the ensemble predicted class probabilities after each iteration of boosting and therefore allows monitoring, such as to determine the predicted class probabilities on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : generator of array, shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ X = self._validate_X_predict(X) n_classes = self.n_classes_ proba = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_proba = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_proba = estimator.predict_proba(X) * weight if proba is None: proba = current_proba else: proba += current_proba real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm)) normalizer = real_proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 real_proba /= normalizer yield real_proba def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the weighted mean predicted class log-probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ return np.log(self.predict_proba(X)) class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin): """An AdaBoost regressor. An AdaBoost [1] regressor is a meta-estimator that begins by fitting a regressor on the original dataset and then fits additional copies of the regressor on the same dataset but where the weights of instances are adjusted according to the error of the current prediction. As such, subsequent regressors focus more on difficult cases. This class implements the algorithm known as AdaBoost.R2 [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeRegressor) The base estimator from which the boosted ensemble is built. Support for sample weighting is required. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each regressor by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. loss : {'linear', 'square', 'exponential'}, optional (default='linear') The loss function to use when updating the weights after each boosting iteration. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Regression error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., loss='linear', random_state=None): super(AdaBoostRegressor, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.loss = loss self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (real numbers). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check loss if self.loss not in ('linear', 'square', 'exponential'): raise ValueError( "loss must be 'linear', 'square', or 'exponential'") # Fit return super(AdaBoostRegressor, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostRegressor, self)._validate_estimator( default=DecisionTreeRegressor(max_depth=3)) def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost for regression Perform a single boost according to the AdaBoost.R2 algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples] The current sample weights. random_state : numpy.RandomState The current random number generator Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The regression error for the current boost. If None then boosting has terminated early. """ estimator = self._make_estimator(random_state=random_state) # Weighted sampling of the training set with replacement # For NumPy >= 1.7.0 use np.random.choice cdf = stable_cumsum(sample_weight) cdf /= cdf[-1] uniform_samples = random_state.random_sample(X.shape[0]) bootstrap_idx = cdf.searchsorted(uniform_samples, side='right') # searchsorted returns a scalar bootstrap_idx = np.array(bootstrap_idx, copy=False) # Fit on the bootstrapped sample and obtain a prediction # for all samples in the training set estimator.fit(X[bootstrap_idx], y[bootstrap_idx]) y_predict = estimator.predict(X) error_vect = np.abs(y_predict - y) error_max = error_vect.max() if error_max != 0.: error_vect /= error_max if self.loss == 'square': error_vect **= 2 elif self.loss == 'exponential': error_vect = 1. - np.exp(- error_vect) # Calculate the average loss estimator_error = (sample_weight * error_vect).sum() if estimator_error <= 0: # Stop if fit is perfect return sample_weight, 1., 0. elif estimator_error >= 0.5: # Discard current estimator only if it isn't the only one if len(self.estimators_) > 1: self.estimators_.pop(-1) return None, None, None beta = estimator_error / (1. - estimator_error) # Boost weight using AdaBoost.R2 alg estimator_weight = self.learning_rate * np.log(1. / beta) if not iboost == self.n_estimators - 1: sample_weight *= np.power( beta, (1. - error_vect) * self.learning_rate) return sample_weight, estimator_weight, estimator_error def _get_median_predict(self, X, limit): # Evaluate predictions of all estimators predictions = np.array([ est.predict(X) for est in self.estimators_[:limit]]).T # Sort the predictions sorted_idx = np.argsort(predictions, axis=1) # Find index of median prediction for each sample weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1) median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] median_idx = median_or_above.argmax(axis=1) median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx] # Return median predictions return predictions[np.arange(X.shape[0]), median_estimators] def predict(self, X): """Predict regression value for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) return self._get_median_predict(X, len(self.estimators_)) def staged_predict(self, X): """Return staged predictions for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : generator of array, shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) for i, _ in enumerate(self.estimators_, 1): yield self._get_median_predict(X, limit=i)
bsd-3-clause
NolanBecker/aima-python
grading/neuralNet-submissions.py
4
2217
import importlib import traceback from grading.util import roster, print_table # from logic import FolKB # from utils import expr import os from sklearn.neural_network import MLPClassifier mlpc = MLPClassifier() def indent(howMuch = 1): space = ' ' for i in range(1, howMuch): space += ' ' return space def tryOne(label, fAndP): frame = fAndP['frame'] if 'mlpc' in fAndP.keys(): clf = fAndP['mlpc'] else: clf = mlpc try: fit = clf.fit(frame.data, frame.target) except: pass print('') # print_table(fit.theta_, # header=[frame.feature_names], # topLeft=[label], # leftColumn=frame.target_names, # numfmt='%6.3f', # njust='center', # tjust='rjust', # ) y_pred = fit.predict(frame.data) print("Number of mislabeled points out of a total %d points : %d" % (len(frame.data), (frame.target != y_pred).sum())) def tryExamples(examples): for label in examples: example = examples[label] main = getattr(example, 'main', None) if main != None: example.main() else: tryOne(label, example) submissions = {} scores = {} message1 = 'Submissions that compile:' root = os.getcwd() for student in roster: try: os.chdir(root + '/submissions/' + student) # http://stackoverflow.com/a/17136796/2619926 mod = importlib.import_module('submissions.' + student + '.myNN') submissions[student] = mod.Examples message1 += ' ' + student except ImportError: pass except: traceback.print_exc() os.chdir(root) print(message1) print('----------------------------------------') for student in roster: if not student in submissions.keys(): continue scores[student] = [] try: examples = submissions[student] print('Bayesian Networks from:', student) tryExamples(examples) except: traceback.print_exc() print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student]))) print('----------------------------------------')
mit
guillemborrell/gtable
tests/test_table_creation.py
1
4044
from gtable import Table import numpy as np import pandas as pd def test_empty_table(): t = Table() assert t.data == [] def test_simple_table(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) assert t.to_dict()['a'][2] == 3 assert np.all(t.index == np.ones((2, 3), dtype=np.uint8)) def test_hcat_table(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) t.add_column('c', [7, 8, 9]) assert np.all(t.index == np.ones((3, 3), dtype=np.uint8)) assert np.all(t.c.values == np.array([7, 8, 9])) t.add_column('d', [0, 1, 2, 3, 4]) assert np.all(t.index == np.array([[1, 1, 1, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 1, 1]], dtype=np.uint8)) def test_vcat_table(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])}) t.stack(t1) assert np.all(t.index == np.array([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]], dtype=np.uint8)) def test_compute_column(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])}) t.stack(t1) t.c = t.a + t.a/2 records = [r for r in t.records()] assert records == [ {'a': 1, 'b': 4, 'c': 1.5}, {'a': 2, 'b': 5, 'c': 3.0}, {'a': 3, 'b': 6, 'c': 4.5}, {'a': 1, 'd': 4, 'c': 1.5}, {'a': 2, 'd': 5, 'c': 3.0}, {'a': 3, 'd': 6, 'c': 4.5}] def test_compute_wrong_size(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])}) t.stack(t1) t.c = t.a + t.b/2 assert np.all(t.c.values == np.array([3, 4.5, 6])) assert np.all(t.c.index == np.array([1, 1, 1, 0, 0, 0])) def test_add_array(): t = Table() t.a = np.arange(10) assert t.__repr__()[:13] == "<Table[ a[10]" def test_add_one(): tb = Table({'a': pd.date_range('2000-01-01', freq='M', periods=10), 'b': np.random.randn(10)}) tb.add_column('schedule', np.array(['first'])) assert np.all(tb.index == np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])) def test_vcat_heterogeneous(): tb = Table({'a': pd.date_range('2000-01-01', freq='M', periods=3), 'b': np.random.randn(3)}) tb.add_column('schedule', np.array(['first'])) tb1 = tb.copy() tb1.schedule.values[0] = 'second' tb.stack(tb1) assert np.all(tb.index == np.array([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 0, 0, 1, 0, 0]], dtype=np.uint8)) assert np.all(tb.schedule.values == np.array(['first', 'secon'])) def test_from_pandas(): df = pd.DataFrame( {'a': [1, 2, 3, 4], 'b': np.arange(4, dtype=np.float64), 'c': pd.date_range('2002-01-01', periods=4, freq='M')} ) t = Table.from_pandas(df) assert np.all(t.a.values == df.a.values) assert np.all(t.b.values == df.b.values) assert np.all(t.c.values == df.c.values) assert np.all(t.idx.values == df.index.values) def test_from_pandas_sparse(): df = pd.DataFrame( {'a': [1, 2, 3, np.nan], 'b': np.arange(4, dtype=np.float64), 'c': pd.date_range('2002-01-01', periods=4, freq='M')} ) t = Table.from_pandas(df) assert np.all(t.index == np.array( [[1, 1, 1, 1], [1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 1, 1]], dtype=np.uint8)) assert np.all(t.a.values == np.array([1,2,3], dtype=np.float64)) def test_simple_rename(): t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])}) t.rename_column('a', 'c') assert t.keys == ['c', 'b']
bsd-3-clause
sysid/nbs
ml_old/Prognose/Evaluator.py
1
3134
from twBase import * # NOQA from pandas import DataFrame from pandas import read_csv from pandas import datetime from sklearn.preprocessing import MinMaxScaler # date-time parsing function for loading the dataset def parser(x): return datetime.strptime('190'+x, '%Y-%m') def get_time(): return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) class DataHarness(object): def __init__(self, fn, isSample=False): if isinstance(fn, str): self.data = self.load(fn, isSample) else: self.data = fn self.differenced = self.diff(n=1, axis=0) self.supervised = self.series_to_supervised(n_in=3, n_out=2) self.train, self.test = self.split(0.8) # fit scaler on train data self.scaler = MinMaxScaler(feature_range=(-1, 1)) self.scaler = self.scaler.fit(self.train) log.info("Rescaling data...") self.train_scaled = self.scaler.transform(self.train) self.test_scaled = self.scaler.transform(self.test) def load(self, fn, isSample=False): ''' Loads csv data from fn Return: numpy array ''' # load dataset log.info("Loading dataset", fn=fn) if isSample: log.info("Loading only sample.") nrows = 100 else: nrows = None df = read_csv(fn, sep=';', decimal=',', nrows=nrows) cols = ['Ist [MWh]'] data = df[cols] return data.values def diff(self, **kwargs): ''' The n-th differences. The shape of the output is the same as a except along axis where the dimension is smaller by n Paramters analog np.diff ''' log.info("Differencing...") return np.diff(self.data, **kwargs) def series_to_supervised(self, **kwargs): log.info("Transforming for supervised learning...") supervised = series_to_supervised(self.differenced, **kwargs) return supervised.values def split(self, split): log.info("Splitting data") splitIndex = int(split * len(self.data)) return self.supervised[:splitIndex], self.supervised[splitIndex:] def invert_scale(self, data): log.info("Inverting scaling..") assert self.scaler is not None, "scaler scaler instantiated" return self.scaler.inverse_transform(data) def invert_diff(self, data, start_value): data = np.insert(data, 0, start_value, axis=0) result = np.cumsum(data) return result[1:] def main(argv=None): logging.basicConfig(format="", stream=sys.stderr, level=logging.DEBUG) logcfg(sys.stderr, logging.DEBUG, RenderEnum.console) log = structlog.get_logger(__name__) twStart() DATA_FN = './data/Lastprognose/2015.csv' evaluator = DataHarness(DATA_FN) data = evaluator.invert_scale(evaluator.train) diffdata = evaluator.invert_diff(evaluator.train[0], evaluator.data[0]) assert np.array_equal(diffdata, np.squeeze(evaluator.data[1:6].reshape((1,5)))) twEnd() return 0 # success if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mit
jjo31/ATHAM-Fluidity
tests/gls-Kato_Phillips-mixed_layer_depth/mixed_layer_depth_all.py
4
4600
#!/usr/bin/env python from numpy import arange,concatenate,array,argsort import os import sys import vtktools import math from pylab import * from matplotlib.ticker import MaxNLocator import re from scipy.interpolate import UnivariateSpline import glob #### taken from http://www.codinghorror.com/blog/archives/001018.html ####### def sort_nicely( l ): """ Sort the given list in the way that humans expect. """ convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] l.sort( key=alphanum_key ) ############################################################################## # compute the mixed layer depth over time def MLD(filelist): x0 = 0. tke0 = 1.0e-5 last_mld = 0 times = [] depths = [] for file in filelist: try: os.stat(file) except: print "No such file: %s" % file sys.exit(1) u=vtktools.vtu(file) time = u.GetScalarField('Time') tt = time[0] kk = u.GetScalarField('GLSTurbulentKineticEnergy') pos = u.GetLocations() if (tt < 100): continue xyzkk = [] for i in range(0,len(kk)): if( abs(pos[i,0] - x0) < 0.1 ): xyzkk.append((pos[i,0],-pos[i,1],pos[i,2],(kk[i]))) xyzkkarr = vtktools.arr(xyzkk) III = argsort(xyzkkarr[:,1]) xyzkkarrsort = xyzkkarr[III,:] # march down the column, grabbing the last value above tk0 and the first # one less than tke0. Interpolate between to get the MLD kea = 1000 keb = 0 zza = 0 zzb = 0 for values in xyzkkarrsort: if (values[3] > tke0): kea = values[3] zza = -values[1] if (values[3] < tke0): keb = values[3] zzb = -values[1] break mld = zza if (last_mld == mld): continue times.append(tt/3600) depths.append(-1.0*mld) last_mld = mld return times, depths path = sys.argv[1] x0 = 0. tke0 = 1.0e-5 files_to_look_through_ke = [ "Kato_Phillips-mld-k_e-CA", "Kato_Phillips-mld-k_e-CB", "Kato_Phillips-mld-k_e-GL", "Kato_Phillips-mld-k_e-KC" ] files_to_look_through_gen = [ "Kato_Phillips-mld-gen-CA", "Kato_Phillips-mld-gen-CB", "Kato_Phillips-mld-gen-GL", "Kato_Phillips-mld-gen-KC" ] files_to_look_through_kw = [ "Kato_Phillips-mld-k_w-CA", "Kato_Phillips-mld-k_w-CB", "Kato_Phillips-mld-k_w-GL", "Kato_Phillips-mld-k_w-KC" ] files_to_look_through_kkl = [ "Kato_Phillips-mld-k_kl-CA", "Kato_Phillips-mld-k_kl-KC" ] colours = ['r','g','b','#8000FF'] times2 = arange(0, 10, 0.1) Dm = 1.05*1.0e-2*(1.0/sqrt(0.01))*sqrt((times2*60*60)); figke = figure(figsize=(9.172,4.5),dpi=90) ax = figke.add_subplot(111) i = 0 for simulation in files_to_look_through_ke: filelist = glob.glob(path+simulation+"*.vtu") sort_nicely(filelist) times, depths = MLD(filelist) ax.plot(times,depths,colours[i],label=simulation) i = i+1 ax.plot(times2,Dm,'k-',label='Analytical') ax.set_ylim(ax.get_ylim()[::-1]) xlabel('Time (hours)') ylabel('ML Depth (m)') legend(loc=0) savefig(path + '/ke.png', dpi=90,format='png') figkw = figure(figsize=(9.172,4.5),dpi=90) ax = figkw.add_subplot(111) i = 0 for simulation in files_to_look_through_kw: filelist = glob.glob(path+simulation+"*.vtu") sort_nicely(filelist) times, depths = MLD(filelist) ax.plot(times,depths,colours[i],label=simulation) i = i+1 ax.plot(times2,Dm,'k-',label='Analytical') ax.set_ylim(ax.get_ylim()[::-1]) xlabel('Time (hours)') ylabel('ML Depth (m)') legend(loc=0) savefig(path + '/kw.png', dpi=90,format='png') figgen = figure(figsize=(9.172,4.5),dpi=90) ax = figgen.add_subplot(111) i = 0 for simulation in files_to_look_through_gen: filelist = glob.glob(path+simulation+"*.vtu") sort_nicely(filelist) times, depths = MLD(filelist) ax.plot(times,depths,colours[i],label=simulation) i = i+1 ax.plot(times2,Dm,'k-',label='Analytical') ax.set_ylim(ax.get_ylim()[::-1]) xlabel('Time (hours)') ylabel('ML Depth (m)') legend(loc=0) savefig(path + '/gen.png', dpi=90,format='png') figkkl = figure(figsize=(9.172,4.5),dpi=90) ax = figkkl.add_subplot(111) i = 0 for simulation in files_to_look_through_kkl: filelist = glob.glob(path+simulation+"*.vtu") sort_nicely(filelist) times, depths = MLD(filelist) ax.plot(times,depths,colours[i],label=simulation) i = i+1 ax.plot(times2,Dm,'k-',label='Analytical') ax.set_ylim(ax.get_ylim()[::-1]) xlabel('Time (hours)') ylabel('ML Depth (m)') legend(loc=0) savefig(path + '/kkl.png', dpi=90,format='png')
lgpl-2.1
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
3