repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
mlyundin/scikit-learn
sklearn/datasets/mlcomp.py
289
3855
# Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause """Glue code to load http://mlcomp.org data as a scikit.learn dataset""" import os import numbers from sklearn.datasets.base import load_files def _load_document_classification(dataset_path, metadata, set_=None, **kwargs): if set_ is not None: dataset_path = os.path.join(dataset_path, set_) return load_files(dataset_path, metadata.get('description'), **kwargs) LOADERS = { 'DocumentClassification': _load_document_classification, # TODO: implement the remaining domain formats } def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs): """Load a datasets as downloaded from http://mlcomp.org Parameters ---------- name_or_id : the integer id or the string name metadata of the MLComp dataset to load set_ : select the portion to load: 'train', 'test' or 'raw' mlcomp_root : the filesystem path to the root folder where MLComp datasets are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME environment variable is looked up instead. **kwargs : domain specific kwargs to be passed to the dataset loader. Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'filenames', the files holding the raw to learn, 'target', the classification labels (integer index), 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. Note on the lookup process: depending on the type of name_or_id, will choose between integer id lookup or metadata name lookup by looking at the unzipped archives and metadata file. TODO: implement zip dataset loading too """ if mlcomp_root is None: try: mlcomp_root = os.environ['MLCOMP_DATASETS_HOME'] except KeyError: raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined") mlcomp_root = os.path.expanduser(mlcomp_root) mlcomp_root = os.path.abspath(mlcomp_root) mlcomp_root = os.path.normpath(mlcomp_root) if not os.path.exists(mlcomp_root): raise ValueError("Could not find folder: " + mlcomp_root) # dataset lookup if isinstance(name_or_id, numbers.Integral): # id lookup dataset_path = os.path.join(mlcomp_root, str(name_or_id)) else: # assume name based lookup dataset_path = None expected_name_line = "name: " + name_or_id for dataset in os.listdir(mlcomp_root): metadata_file = os.path.join(mlcomp_root, dataset, 'metadata') if not os.path.exists(metadata_file): continue with open(metadata_file) as f: for line in f: if line.strip() == expected_name_line: dataset_path = os.path.join(mlcomp_root, dataset) break if dataset_path is None: raise ValueError("Could not find dataset with metadata line: " + expected_name_line) # loading the dataset metadata metadata = dict() metadata_file = os.path.join(dataset_path, 'metadata') if not os.path.exists(metadata_file): raise ValueError(dataset_path + ' is not a valid MLComp dataset') with open(metadata_file) as f: for line in f: if ":" in line: key, value = line.split(":", 1) metadata[key.strip()] = value.strip() format = metadata.get('format', 'unknow') loader = LOADERS.get(format) if loader is None: raise ValueError("No loader implemented for format: " + format) return loader(dataset_path, metadata, set_=set_, **kwargs)
bsd-3-clause
andaag/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
226
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
B3AU/waveTree
examples/bicluster/plot_spectral_biclustering.py
7
2010
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print "consensus score: {:.1f}".format(score) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
liangz0707/scikit-learn
benchmarks/bench_tree.py
297
3617
""" To run this, you'll need to have installed. * scikit-learn Does two benchmarks First, we fix a training set, increase the number of samples to classify and plot number of classified samples as a function of time. In the second benchmark, we increase the number of dimensions of the training set, classify a sample and plot the time taken as a function of the number of dimensions. """ import numpy as np import pylab as pl import gc from datetime import datetime # to store the results scikit_classifier_results = [] scikit_regressor_results = [] mu_second = 0.0 + 10 ** 6 # number of microseconds in a second def bench_scikit_tree_classifier(X, Y): """Benchmark with scikit-learn decision tree classifier""" from sklearn.tree import DecisionTreeClassifier gc.collect() # start time tstart = datetime.now() clf = DecisionTreeClassifier() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_classifier_results.append( delta.seconds + delta.microseconds / mu_second) def bench_scikit_tree_regressor(X, Y): """Benchmark with scikit-learn decision tree regressor""" from sklearn.tree import DecisionTreeRegressor gc.collect() # start time tstart = datetime.now() clf = DecisionTreeRegressor() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_regressor_results.append( delta.seconds + delta.microseconds / mu_second) if __name__ == '__main__': print('============================================') print('Warning: this is going to take a looong time') print('============================================') n = 10 step = 10000 n_samples = 10000 dim = 10 n_classes = 10 for i in range(n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') n_samples += step X = np.random.randn(n_samples, dim) Y = np.random.randint(0, n_classes, (n_samples,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(n_samples) bench_scikit_tree_regressor(X, Y) xx = range(0, n * step, step) pl.figure('scikit-learn tree benchmark results') pl.subplot(211) pl.title('Learning with varying number of samples') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') scikit_classifier_results = [] scikit_regressor_results = [] n = 10 step = 500 start_dim = 500 n_classes = 10 dim = start_dim for i in range(0, n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') dim += step X = np.random.randn(100, dim) Y = np.random.randint(0, n_classes, (100,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(100) bench_scikit_tree_regressor(X, Y) xx = np.arange(start_dim, start_dim + n * step, step) pl.subplot(212) pl.title('Learning in high dimensional spaces') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of dimensions') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
mac389/at-risk-agents
analyze-abm.py
1
8030
import os, json,re import numpy as np import Graphics as artist import matplotlib.pyplot as plt plt.switch_backend('Agg') from matplotlib import rcParams from optparse import OptionParser from scipy.stats import percentileofscore,scoreatpercentile rcParams['text.usetex'] = True parser = OptionParser(usage="usage: %prog [options] filename", version="%prog 1.0") parser.add_option("-s", "--source", action="store", dest="source", default=False, help="Folder with data to analyze") (options, args) = parser.parse_args() READ = 'rb' DELIMITER = '\t' basepath = os.path.join(os.getcwd(),options.source) hist_props={"range":[-1,1],"histtype":"stepfilled"} make_filename = lambda filename: os.path.join(os.getcwd(),basepath,filename) USERS = 0 TIME = 1 ''' Questions to ask: 1. Do those with the worst drinking behavior have a different alphas (susceptibilities) than those with the best 2. Is targeted intervention (using our method of identification) effective ''' verbose = False data = {} print 'ABM BASEPATH IS ',basepath directory = json.load(open(os.path.join(os.getcwd(),basepath,'directory.json'),READ)) for variable in directory: if verbose: print 'Analyzing %s'%variable data[variable] = np.loadtxt(directory[variable],delimiter = DELIMITER) if variable == 'complete record': shape = tuple(map(int,re.findall('\d+',directory[variable]))[-3:]) tmp = np.reshape(data[variable],shape) #-- Figure out shape from filename data[variable] = tmp #Bounds for drinking behavior upper_quartile_cutoff = scoreatpercentile(data['past month drinking'],75) lower_quartile_cutoff = scoreatpercentile(data['past month drinking'],25) light_users_idx = np.unique(np.where(data['past month drinking']<lower_quartile_cutoff)[USERS]) heavy_users_idx = np.where(data['past month drinking']>upper_quartile_cutoff)[USERS] periods_of_intense_drinking = {agent:count for agent,count in enumerate(np.bincount(heavy_users_idx)) if count>0} temporal_threshold = scoreatpercentile(periods_of_intense_drinking.values(),75) heavy_frequent_users_idx = [agent for agent in periods_of_intense_drinking if periods_of_intense_drinking[agent] > temporal_threshold] heavy_not_frequent_users_idx = np.array(list(set(periods_of_intense_drinking.keys()) - set(heavy_frequent_users_idx))) heavy_users_idx = np.unique(heavy_users_idx) #Identify baseline characteristics of each quartile variable_filenames = [filename for filename in os.listdir(basepath) if 'initial-distribution' in filename] demographics = {filename:np.loadtxt(make_filename(filename),delimiter=DELIMITER) for filename in variable_filenames} nrows = 2 ncols = 3 normalize = lambda data: (data-data.min())/float(data.max()-data.min()) fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True) yvars = open('./agent-variables',READ).read().splitlines() characteristics = ['initial-distribution-%s.txt'%('-'.join(yvar.split())) for yvar in yvars] #Compare heavy users vs light users for i,col in enumerate(axs): for j,row in enumerate(col): characteristic = characteristics[i*ncols+j] uq = demographics[characteristic][heavy_not_frequent_users_idx] lq = demographics[characteristic][heavy_frequent_users_idx] _,_,patches1=row.hist(uq,color='k',label=artist.format('Heavy Users'),range=(-1,1)) plt.hold(True) _,_,patches2=row.hist(lq,color='r',alpha=0.5,label=artist.format('Heavy Frequent Users'),range=(-1,1)) fig.canvas.mpl_connect('draw_event', artist.on_draw) artist.adjust_spines(row) if 'attitude' not in yvars[i*ncols+j]: row.set_xlabel(artist.format(yvars[i*ncols+j])) elif 'psychological' in yvars[i*ncols+j]: label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences'])) row.set_xlabel(label) elif 'medical' in yvars[i*ncols+j]: label = '\n'.join(map(artist.format,['Attitude','to medical','consequences'])) row.set_xlabel(label) plt.tight_layout() fig.legend((patches1[0], patches2[0]), (artist.format('Heavy Users'),artist.format('Heavy Frequent Users')), loc='lower right', frameon=False, ncol=2) #filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-no-temporal-threshold.png') filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-frequent-vs-not-heavy.png') plt.savefig(filename,dpi=300) del fig,axs,i,j fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True) #Compare heavy users vs frequent users for i,col in enumerate(axs): for j,row in enumerate(col): characteristic = characteristics[i*ncols+j] uq = demographics[characteristic][heavy_users_idx] lq = demographics[characteristic][light_users_idx] _,_,patches1=row.hist(uq,color='k',label=artist.format('Heavy Users'),range=(-1,1)) plt.hold(True) _,_,patches2=row.hist(lq,color='r',alpha=0.5,label=artist.format('Light Users'),range=(-1,1)) fig.canvas.mpl_connect('draw_event', artist.on_draw) artist.adjust_spines(row) if 'attitude' not in yvars[i*ncols+j]: row.set_xlabel(artist.format(yvars[i*ncols+j])) elif 'psychological' in yvars[i*ncols+j]: label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences'])) row.set_xlabel(label) elif 'medical' in yvars[i*ncols+j]: label = '\n'.join(map(artist.format,['Attitude','to medical','consequences'])) row.set_xlabel(label) plt.tight_layout() fig.legend((patches1[0], patches2[0]), (artist.format('Heavy Users'),artist.format('Light Users')), loc='lower right', frameon=False, ncol=2) #filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-no-temporal-threshold.png') filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-light-vs-heavy.png') plt.savefig(filename,dpi=300) del fig,axs data = np.loadtxt(make_filename('alpha.txt'),delimiter=DELIMITER) uq =data[heavy_users_idx] lq = data[heavy_frequent_users_idx] fig = plt.figure() ax = fig.add_subplot(111) ax.hist(uq,color='k',label=artist.format('Heavy Users'),range=(0,1),bins=20) plt.hold(True) ax.hist(lq,color='r',alpha=0.5,label=artist.format('Heavy Frequent Users'),range=(0,1),bins=20) fig.canvas.mpl_connect('draw_event', artist.on_draw) artist.adjust_spines(ax) ax.set_ylabel(artist.format('Prevalance')) ax.set_xlabel(artist.format('Social Susceptibility')) plt.legend(frameon=False,ncol=2,loc='upper center',bbox_to_anchor=(.5,1.05)) plt.tight_layout() #plt.savefig(make_filename('susceptibility-no-temporal-threshold.png'),dpi=300) plt.savefig(make_filename('susceptibility-frequent-vs-frequent-heavy.png'),dpi=300) del fig,ax data = np.loadtxt(make_filename('alpha.txt'),delimiter=DELIMITER) uq =data[heavy_not_frequent_users_idx] lq = data[light_users_idx] fig = plt.figure() ax = fig.add_subplot(111) ax.hist(uq,color='k',label=artist.format('Heavy Users'),range=(0,1),bins=20) plt.hold(True) ax.hist(lq,color='r',alpha=0.5,label=artist.format('Light Users'),range=(0,1),bins=20) fig.canvas.mpl_connect('draw_event', artist.on_draw) artist.adjust_spines(ax) ax.set_ylabel(artist.format('Prevalance')) ax.set_xlabel(artist.format('Social Susceptibility')) plt.legend(frameon=False,ncol=2,loc='upper center',bbox_to_anchor=(.5,1.05)) plt.tight_layout() #plt.savefig(make_filename('susceptibility-no-temporal-threshold.png'),dpi=300) plt.savefig(make_filename('susceptibility-heavy-light.png'),dpi=300) #--- Create graphical output visualization.graph_everything(basepath=basepath,moniker=options.target,verbose=False,logfilename=logfilename) #Need a measure to show their behaviors are different visualization.population_summary(moniker=options.target+'-at-risk',basepath=basepath,criterion=list(target_idx), criterionname='at risk') visualization.time_series(moniker=options.target, basepath=basepath,criterion = list(target_idx), criterionname='at risk') visualization.snapshots(drinking_behavior[:,start],drinking_behavior[:,stop-1],moniker='beta-%.02f'%beta,basepath=basepath)
mit
rseubert/scikit-learn
sklearn/neighbors/approximate.py
3
21294
"""Approximate nearest neighbor search""" # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # Joel Nothman <joel.nothman@gmail.com> import numpy as np import warnings from scipy import sparse from .base import KNeighborsMixin, RadiusNeighborsMixin from ..base import BaseEstimator from ..utils.validation import check_array from ..utils import check_random_state from ..metrics.pairwise import pairwise_distances from ..random_projection import GaussianRandomProjection __all__ = ["LSHForest"] HASH_DTYPE = '>u4' MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8 def _find_matching_indices(tree, bin_X, left_mask, right_mask): """Finds indices in sorted array of integers. Most significant h bits in the binary representations of the integers are matched with the items' most significant h bits. """ left_index = np.searchsorted(tree, bin_X & left_mask) right_index = np.searchsorted(tree, bin_X | right_mask, side='right') return left_index, right_index def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks): """Find the longest prefix match in tree for each query in bin_X Most significant bits are considered as the prefix. """ hi = np.empty_like(bin_X, dtype=np.intp) hi.fill(hash_size) lo = np.zeros_like(bin_X, dtype=np.intp) res = np.empty_like(bin_X, dtype=np.intp) left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi]) found = right_idx > left_idx res[found] = lo[found] = hash_size r = np.arange(bin_X.shape[0]) kept = r[lo < hi] # indices remaining in bin_X mask while kept.shape[0]: mid = (lo.take(kept) + hi.take(kept)) // 2 left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid]) found = right_idx > left_idx mid_found = mid[found] lo[kept[found]] = mid_found + 1 res[kept[found]] = mid_found hi[kept[~found]] = mid[~found] kept = r[lo < hi] return res class ProjectionToHashMixin(object): """Turn a transformed real-valued array into a hash""" @staticmethod def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1) def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X, y=None): return self._to_hash(super(ProjectionToHashMixin, self).transform(X)) class GaussianRandomProjectionHash(ProjectionToHashMixin, GaussianRandomProjection): """Use GaussianRandomProjection to produce a cosine LSH fingerprint""" def __init__(self, n_components=8, random_state=None): super(GaussianRandomProjectionHash, self).__init__( n_components=n_components, random_state=random_state) def _array_of_arrays(list_of_arrays): """Creates an array of array from list of arrays.""" out = np.empty(len(list_of_arrays), dtype=object) out[:] = list_of_arrays return out class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin): """Performs approximate nearest neighbor search using LSH forest. LSH Forest: Locality Sensitive Hashing forest [1] is an alternative method for vanilla approximate nearest neighbor search methods. LSH forest data structure has been implemented using sorted arrays and binary search and 32 bit fixed-length hashes. Random projection is used as the hash family which approximates cosine distance. Parameters ---------- n_estimators : int (default = 10) Number of trees in the LSH Forest. min_hash_match : int (default = 4) lowest hash length to be searched when candidate selection is performed for nearest neighbors. n_candidates : int (default = 10) Minimum number of candidates evaluated per estimator, assuming enough items meet the `min_hash_match` constraint. n_neighbors : int (default = 5) Number of neighbors to be returned from query function when it is not provided to the :meth:`kneighbors` method. radius : float, optinal (default = 1.0) Radius from the data point to its neighbors. This is the parameter space to use by default for the :meth`radius_neighbors` queries. radius_cutoff_ratio : float, optional (default = 0.9) A value ranges from 0 to 1. Radius neighbors will be searched until the ratio between total neighbors within the radius and the total candidates becomes less than this value unless it is terminated by hash length reaching `min_hash_match`. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- hash_functions_ : list of GaussianRandomProjectionHash objects Hash function g(p,x) for a tree is an array of 32 randomly generated float arrays with the same dimenstion as the data set. This array is stored in GaussianRandomProjectionHash object and can be obtained from ``components_`` attribute. trees_ : array, shape (n_estimators, n_samples) Each tree (corresponding to a hash function) contains an array of sorted hashed values. The array representation may change in future versions. original_indices_ : array, shape (n_estimators, n_samples) Original indices of sorted hashed values in the fitted index. References ---------- .. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning Indexes for Similarity Search", WWW '05 Proceedings of the 14th international conference on World Wide Web, 651-660, 2005. Examples -------- >>> from sklearn.neighbors import LSHForest >>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]] >>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]] >>> lshf = LSHForest() >>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10, n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9, random_state=None) >>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2) >>> distances # doctest: +ELLIPSIS array([[ 0.069..., 0.149...], [ 0.229..., 0.481...], [ 0.004..., 0.014...]]) >>> indices array([[1, 2], [2, 0], [4, 0]]) """ def __init__(self, n_estimators=10, radius=1.0, n_candidates=50, n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9, random_state=None): self.n_estimators = n_estimators self.radius = radius self.random_state = random_state self.n_candidates = n_candidates self.n_neighbors = n_neighbors self.min_hash_match = min_hash_match self.radius_cutoff_ratio = radius_cutoff_ratio def _compute_distances(self, query, candidates): """Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances. """ if candidates.shape == (0,): # needed since _fit_X[np.array([])] doesn't work if _fit_X sparse return np.empty(0, dtype=np.int), np.empty(0, dtype=float) distances = pairwise_distances(query, self._fit_X[candidates], metric='cosine')[0] distance_positions = np.argsort(distances) return distance_positions, distances[distance_positions] def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE) def _get_candidates(self, query, max_depth, bin_queries, n_neighbors): """Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances. """ index_size = self._fit_X.shape[0] # Number of candidates considered including duplicates # XXX: not sure whether this is being calculated correctly wrt # duplicates from different iterations through a single tree n_candidates = 0 candidate_set = set() min_candidates = self.n_candidates * self.n_estimators while (max_depth > self.min_hash_match and (n_candidates < min_candidates or len(candidate_set) < n_neighbors)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += stop - start candidate_set.update( self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) # For insufficient candidates, candidates are filled. # Candidates are filled from unselected indices uniformly. if candidates.shape[0] < n_neighbors: warnings.warn( "Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (n_neighbors, self.min_hash_match)) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = n_neighbors - candidates.shape[0] candidates = np.concatenate((candidates, remaining[:to_fill])) ranks, distances = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors]) def _get_radius_neighbors(self, query, max_depth, bin_queries, radius): """Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances. """ ratio_within_radius = 1 threshold = 1 - self.radius_cutoff_ratio total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while (max_depth > self.min_hash_match and ratio_within_radius > threshold): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend( self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) ranks, distances = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = max_depth - 1 return total_neighbors, total_distances def fit(self, X, y=None): """Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self : object Returns self. """ self._fit_X = check_array(X, accept_sparse='csr') # Creates a g(p,x) for each tree self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): # This is g(p,x) for a particular tree. # Builds a single tree. Hashing is done on an array of data points. # `GaussianRandomProjection` is used for hashing. # `n_components=hash size and n_features=n_dim. hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self def _query(self, X): """Performs descending phase to find maximum depth.""" # Calculate hashes of shape (n_samples, n_estimators, [hash_size]) bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) # descend phase depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for tree, tree_queries in zip(self.trees_, np.rollaxis(bin_queries, 1))] return bin_queries, np.max(depths, axis=0) def kneighbors(self, X, n_neighbors=None, return_distance=True): """ Returns the n_number of approximated nearest neighbors Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, opitonal (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if n_neighbors is None: n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_candidates(X[i], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return np.array(distances), np.array(neighbors) else: return np.array(neighbors) def radius_neighbors(self, X, radius=None, return_distance=True): """ Returns the approximated nearest neighbors within the radius Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples,) of arrays Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples,) of arrays An array of arrays of indices of the approximated nearest points with in the `radius` to the query in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if radius is None: radius = self.radius X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_radius_neighbors(X[i], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return _array_of_arrays(distances), _array_of_arrays(neighbors) else: return _array_of_arrays(neighbors) def partial_fit(self, X, y=None): """ Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest. """ X = check_array(X, accept_sparse='csr') if not hasattr(self, 'hash_functions_'): return self.fit(X) if X.shape[1] != self._fit_X.shape[1]: raise ValueError("Number of features in X and" " fitted array does not match.") n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] # gets the position to be added in the tree. positions = self.trees_[i].searchsorted(bin_X) # adds the hashed value into the tree. self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) # add the entry into the original_indices_. self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, n_indexed + n_samples)) # adds the entry into the input_array. if sparse.issparse(X) or sparse.issparse(self._fit_X): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
bsd-3-clause
thientu/scikit-learn
sklearn/utils/arpack.py
265
64837
""" This contains a copy of the future version of scipy.sparse.linalg.eigen.arpack.eigsh It's an upgraded wrapper of the ARPACK library which allows the use of shift-invert mode for symmetric matrices. Find a few eigenvectors and eigenvalues of a matrix. Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ """ # Wrapper implementation notes # # ARPACK Entry Points # ------------------- # The entry points to ARPACK are # - (s,d)seupd : single and double precision symmetric matrix # - (s,d,c,z)neupd: single,double,complex,double complex general matrix # This wrapper puts the *neupd (general matrix) interfaces in eigs() # and the *seupd (symmetric matrix) in eigsh(). # There is no Hermetian complex/double complex interface. # To find eigenvalues of a Hermetian matrix you # must use eigs() and not eigsh() # It might be desirable to handle the Hermetian case differently # and, for example, return real eigenvalues. # Number of eigenvalues returned and complex eigenvalues # ------------------------------------------------------ # The ARPACK nonsymmetric real and double interface (s,d)naupd return # eigenvalues and eigenvectors in real (float,double) arrays. # Since the eigenvalues and eigenvectors are, in general, complex # ARPACK puts the real and imaginary parts in consecutive entries # in real-valued arrays. This wrapper puts the real entries # into complex data types and attempts to return the requested eigenvalues # and eigenvectors. # Solver modes # ------------ # ARPACK and handle shifted and shift-inverse computations # for eigenvalues by providing a shift (sigma) and a solver. __docformat__ = "restructuredtext en" __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] import warnings from scipy.sparse.linalg.eigen.arpack import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator from scipy.sparse import identity, isspmatrix, isspmatrix_csr from scipy.linalg import lu_factor, lu_solve from scipy.sparse.sputils import isdense from scipy.sparse.linalg import gmres, splu import scipy from distutils.version import LooseVersion _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} _ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} DNAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found. IPARAM(5) " "returns the number of wanted converged Ritz values.", 2: "No longer an informational error. Deprecated starting " "with release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the " "Implicitly restarted Arnoldi iteration. One possibility " "is to increase the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation;", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible.", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated." } SNAUPD_ERRORS = DNAUPD_ERRORS ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." CNAUPD_ERRORS = ZNAUPD_ERRORS DSAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found.", 2: "No longer an informational error. Deprecated starting with " "release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the Implicitly " "restarted Arnoldi iteration. One possibility is to increase " "the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from trid. eigenvalue calculation; " "Informational error from LAPACK routine dsteqr .", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible. ", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated.", } SSAUPD_ERRORS = DSAUPD_ERRORS DNEUPD_ERRORS = { 0: "Normal exit.", 1: "The Schur form computed by LAPACK routine dlahqr " "could not be reordered by LAPACK routine dtrsen. " "Re-enter subroutine dneupd with IPARAM(5)NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least NCV " "columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from calculation of a real Schur form. " "Informational error from LAPACK routine dlahqr .", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine dtrevc.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "DNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "DNEUPD got a different count of the number of converged " "Ritz values than DNAUPD got. This indicates the user " "probably made an error in passing data from DNAUPD to " "DNEUPD or that the data was modified before entering " "DNEUPD", } SNEUPD_ERRORS = DNEUPD_ERRORS.copy() SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " "could not be reordered by LAPACK routine strsen . " "Re-enter subroutine dneupd with IPARAM(5)=NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.") SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " "accuracy.") SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " "converged Ritz values than SNAUPD got. This indicates " "the user probably made an error in passing data from " "SNAUPD to SNEUPD or that the data was modified before " "entering SNEUPD") ZNEUPD_ERRORS = {0: "Normal exit.", 1: "The Schur form computed by LAPACK routine csheqr " "could not be reordered by LAPACK routine ztrsen. " "Re-enter subroutine zneupd with IPARAM(5)=NCV and " "increase the size of the array D to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 1 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation. " "This should never happened.", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine ztrevc.", -10: "IPARAM(7) must be 1,2,3", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "ZNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "ZNEUPD got a different count of the number of " "converged Ritz values than ZNAUPD got. This " "indicates the user probably made an error in passing " "data from ZNAUPD to ZNEUPD or that the data was " "modified before entering ZNEUPD"} CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " "accuracy.") CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " "converged Ritz values than CNAUPD got. This indicates " "the user probably made an error in passing data from " "CNAUPD to CNEUPD or that the data was modified before " "entering CNEUPD") DSEUPD_ERRORS = { 0: "Normal exit.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: ("Error return from trid. eigenvalue calculation; " "Information error from LAPACK routine dsteqr."), -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "NEV and WHICH = 'BE' are incompatible.", -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", -16: "HOWMNY = 'S' not yet implemented", -17: ("DSEUPD got a different count of the number of converged " "Ritz values than DSAUPD got. This indicates the user " "probably made an error in passing data from DSAUPD to " "DSEUPD or that the data was modified before entering " "DSEUPD.") } SSEUPD_ERRORS = DSEUPD_ERRORS.copy() SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " "to sufficient accuracy.") SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " "converged " "Ritz values than SSAUPD got. This indicates the user " "probably made an error in passing data from SSAUPD to " "SSEUPD or that the data was modified before entering " "SSEUPD.") _SAUPD_ERRORS = {'d': DSAUPD_ERRORS, 's': SSAUPD_ERRORS} _NAUPD_ERRORS = {'d': DNAUPD_ERRORS, 's': SNAUPD_ERRORS, 'z': ZNAUPD_ERRORS, 'c': CNAUPD_ERRORS} _SEUPD_ERRORS = {'d': DSEUPD_ERRORS, 's': SSEUPD_ERRORS} _NEUPD_ERRORS = {'d': DNEUPD_ERRORS, 's': SNEUPD_ERRORS, 'z': ZNEUPD_ERRORS, 'c': CNEUPD_ERRORS} # accepted values of parameter WHICH in _SEUPD _SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] # accepted values of parameter WHICH in _NAUPD _NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] class ArpackError(RuntimeError): """ ARPACK error """ def __init__(self, info, infodict=_NAUPD_ERRORS): msg = infodict.get(info, "Unknown error") RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) class ArpackNoConvergence(ArpackError): """ ARPACK iteration did not converge Attributes ---------- eigenvalues : ndarray Partial result. Converged eigenvalues. eigenvectors : ndarray Partial result. Converged eigenvectors. """ def __init__(self, msg, eigenvalues, eigenvectors): ArpackError.__init__(self, -1, {-1: msg}) self.eigenvalues = eigenvalues self.eigenvectors = eigenvectors class _ArpackParams(object): def __init__(self, n, k, tp, mode=1, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if k <= 0: raise ValueError("k must be positive, k=%d" % k) if maxiter is None: maxiter = n * 10 if maxiter <= 0: raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) if tp not in 'fdFD': raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") if v0 is not None: # ARPACK overwrites its initial resid, make a copy self.resid = np.array(v0, copy=True) info = 1 else: self.resid = np.zeros(n, tp) info = 0 if sigma is None: #sigma not used self.sigma = 0 else: self.sigma = sigma if ncv is None: ncv = 2 * k + 1 ncv = min(ncv, n) self.v = np.zeros((n, ncv), tp) # holds Ritz vectors self.iparam = np.zeros(11, "int") # set solver mode and parameters ishfts = 1 self.mode = mode self.iparam[0] = ishfts self.iparam[2] = maxiter self.iparam[3] = 1 self.iparam[6] = mode self.n = n self.tol = tol self.k = k self.maxiter = maxiter self.ncv = ncv self.which = which self.tp = tp self.info = info self.converged = False self.ido = 0 def _raise_no_convergence(self): msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" k_ok = self.iparam[4] num_iter = self.iparam[2] try: ev, vec = self.extract(True) except ArpackError as err: msg = "%s [%s]" % (msg, err) ev = np.zeros((0,)) vec = np.zeros((self.n, 0)) k_ok = 0 raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) class _SymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x : # A - symmetric # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the general eigenvalue problem: # A*x = lambda*M*x # A - symmetric # M - symmetric positive definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # # mode = 4: # Solve the general eigenvalue problem in Buckling mode: # A*x = lambda*AG*x # A - symmetric positive semi-definite # AG - symmetric indefinite # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = left multiplication by [A-sigma*AG]^-1 # # mode = 5: # Solve the general eigenvalue problem in Cayley-transformed mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode == 3: if matvec is not None: raise ValueError("matvec must not be specified for mode=3") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=3") if M_matvec is None: self.OP = Minv_matvec self.OPa = Minv_matvec self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(M_matvec(x)) self.OPa = Minv_matvec self.B = M_matvec self.bmat = 'G' elif mode == 4: if matvec is None: raise ValueError("matvec must be specified for mode=4") if M_matvec is not None: raise ValueError("M_matvec must not be specified for mode=4") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=4") self.OPa = Minv_matvec self.OP = lambda x: self.OPa(matvec(x)) self.B = matvec self.bmat = 'G' elif mode == 5: if matvec is None: raise ValueError("matvec must be specified for mode=5") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=5") self.OPa = Minv_matvec self.A_matvec = matvec if M_matvec is None: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * M_matvec(x)) self.B = M_matvec self.bmat = 'G' else: raise ValueError("mode=%i not implemented" % mode) if which not in _SEUPD_WHICH: raise ValueError("which must be one of %s" % ' '.join(_SEUPD_WHICH)) if k >= n: raise ValueError("k must be less than rank(A), k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k: raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp) ltr = _type_conv[self.tp] if ltr not in ["s", "d"]: raise ValueError("Input matrix is not real-valued.") self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] self.iterate_infodict = _SAUPD_ERRORS[ltr] self.extract_infodict = _SEUPD_ERRORS[ltr] self.ipntr = np.zeros(11, "int") def iterate(self): self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode == 1: self.workd[yslice] = self.OP(self.workd[xslice]) elif self.mode == 2: self.workd[xslice] = self.OPb(self.workd[xslice]) self.workd[yslice] = self.OPa(self.workd[xslice]) elif self.mode == 5: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) Ax = self.A_matvec(self.workd[xslice]) self.workd[yslice] = self.OPa(Ax + (self.sigma * self.workd[Bxslice])) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): rvec = return_eigenvectors ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam[0:7], self.ipntr, self.workd[0:2 * self.n], self.workl, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d class _UnsymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x # A - square matrix # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the generalized eigenvalue problem: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3,4: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # if A is real and mode==3, use the real part of Minv_matvec # if A is real and mode==4, use the imag part of Minv_matvec # if A is complex and mode==3, # use real and imag parts of Minv_matvec if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode in (3, 4): if matvec is None: raise ValueError("matvec must be specified " "for mode in (3,4)") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified " "for mode in (3,4)") self.matvec = matvec if tp in 'DF': # complex type if mode == 3: self.OPa = Minv_matvec else: raise ValueError("mode=4 invalid for complex A") else: # real type if mode == 3: self.OPa = lambda x: np.real(Minv_matvec(x)) else: self.OPa = lambda x: np.imag(Minv_matvec(x)) if M_matvec is None: self.B = lambda x: x self.bmat = 'I' self.OP = self.OPa else: self.B = M_matvec self.bmat = 'G' self.OP = lambda x: self.OPa(M_matvec(x)) else: raise ValueError("mode=%i not implemented" % mode) if which not in _NEUPD_WHICH: raise ValueError("Parameter which must be one of %s" % ' '.join(_NEUPD_WHICH)) if k >= n - 1: raise ValueError("k must be less than rank(A)-1, k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k + 1: raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp) ltr = _type_conv[self.tp] self._arpack_solver = _arpack.__dict__[ltr + 'naupd'] self._arpack_extract = _arpack.__dict__[ltr + 'neupd'] self.iterate_infodict = _NAUPD_ERRORS[ltr] self.extract_infodict = _NEUPD_ERRORS[ltr] self.ipntr = np.zeros(14, "int") if self.tp in 'FD': self.rwork = np.zeros(self.ncv, self.tp.lower()) else: self.rwork = None def iterate(self): if self.tp in 'fd': self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) else: self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode in (1, 2): self.workd[yslice] = self.OP(self.workd[xslice]) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): k, n = self.k, self.n ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused sigmar = np.real(self.sigma) sigmai = np.imag(self.sigma) workev = np.zeros(3 * self.ncv, self.tp) if self.tp in 'fd': dr = np.zeros(k + 1, self.tp) di = np.zeros(k + 1, self.tp) zr = np.zeros((n, k + 1), self.tp) dr, di, zr, ierr = \ self._arpack_extract( return_eigenvectors, howmny, sselect, sigmar, sigmai, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) nreturned = self.iparam[4] # number of good eigenvalues returned # Build complex eigenvalues from real and imaginary parts d = dr + 1.0j * di # Arrange the eigenvectors: complex eigenvectors are stored as # real,imaginary in consecutive columns z = zr.astype(self.tp.upper()) # The ARPACK nonsymmetric real and double interface (s,d)naupd # return eigenvalues and eigenvectors in real (float,double) # arrays. # Efficiency: this should check that return_eigenvectors == True # before going through this construction. if sigmai == 0: i = 0 while i <= k: # check if complex if abs(d[i].imag) != 0: # this is a complex conjugate pair with eigenvalues # in consecutive columns if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 else: # real matrix, mode 3 or 4, imag(sigma) is nonzero: # see remark 3 in <s,d>neupd.f # Build complex eigenvalues from real and imaginary parts i = 0 while i <= k: if abs(d[i].imag) == 0: d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) else: if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() d[i] = ((np.dot(zr[:, i], self.matvec(zr[:, i])) + np.dot(zr[:, i + 1], self.matvec(zr[:, i + 1]))) + 1j * (np.dot(zr[:, i], self.matvec(zr[:, i + 1])) - np.dot(zr[:, i + 1], self.matvec(zr[:, i])))) d[i + 1] = d[i].conj() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" if nreturned <= k: # we got less or equal as many eigenvalues we wanted d = d[:nreturned] z = z[:, :nreturned] else: # we got one extra eigenvalue (likely a cc pair, but which?) # cut at approx precision for sorting rd = np.round(d, decimals=_ndigits[self.tp]) if self.which in ['LR', 'SR']: ind = np.argsort(rd.real) elif self.which in ['LI', 'SI']: # for LI,SI ARPACK returns largest,smallest # abs(imaginary) why? ind = np.argsort(abs(rd.imag)) else: ind = np.argsort(abs(rd)) if self.which in ['LR', 'LM', 'LI']: d = d[ind[-k:]] z = z[:, ind[-k:]] if self.which in ['SR', 'SM', 'SI']: d = d[ind[:k]] z = z[:, ind[:k]] else: # complex is so much simpler... d, z, ierr =\ self._arpack_extract( return_eigenvectors, howmny, sselect, self.sigma, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d def _aslinearoperator_with_dtype(m): m = aslinearoperator(m) if not hasattr(m, 'dtype'): x = np.zeros(m.shape[1]) m.dtype = (m * x).dtype return m class SpLuInv(LinearOperator): """ SpLuInv: helper class to repeatedly solve M*x=b using a sparse LU-decopposition of M """ def __init__(self, M): self.M_lu = splu(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) self.isreal = not np.issubdtype(self.dtype, np.complexfloating) def _matvec(self, x): # careful here: splu.solve will throw away imaginary # part of x if M is real if self.isreal and np.issubdtype(x.dtype, np.complexfloating): return (self.M_lu.solve(np.real(x)) + 1j * self.M_lu.solve(np.imag(x))) else: return self.M_lu.solve(x) class LuInv(LinearOperator): """ LuInv: helper class to repeatedly solve M*x=b using an LU-decomposition of M """ def __init__(self, M): self.M_lu = lu_factor(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) def _matvec(self, x): return lu_solve(self.M_lu, x) class IterInv(LinearOperator): """ IterInv: helper class to repeatedly solve M*x=b using an iterative method. """ def __init__(self, M, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(M.dtype).eps self.M = M self.ifunc = ifunc self.tol = tol if hasattr(M, 'dtype'): dtype = M.dtype else: x = np.zeros(M.shape[1]) dtype = (M * x).dtype LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype) def _matvec(self, x): b, info = self.ifunc(self.M, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting M: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b class IterOpInv(LinearOperator): """ IterOpInv: helper class to repeatedly solve [A-sigma*M]*x = b using an iterative method """ def __init__(self, A, M, sigma, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(A.dtype).eps self.A = A self.M = M self.sigma = sigma self.ifunc = ifunc self.tol = tol x = np.zeros(A.shape[1]) if M is None: dtype = self.mult_func_M_None(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func_M_None, dtype=dtype) else: dtype = self.mult_func(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func, dtype=dtype) LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype) def mult_func(self, x): return self.A.matvec(x) - self.sigma * self.M.matvec(x) def mult_func_M_None(self, x): return self.A.matvec(x) - self.sigma * x def _matvec(self, x): b, info = self.ifunc(self.OP, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting [A-sigma*M]: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b def get_inv_matvec(M, symmetric=False, tol=0): if isdense(M): return LuInv(M).matvec elif isspmatrix(M): if isspmatrix_csr(M) and symmetric: M = M.T return SpLuInv(M).matvec else: return IterInv(M, tol=tol).matvec def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): if sigma == 0: return get_inv_matvec(A, symmetric=symmetric, tol=tol) if M is None: #M is the identity matrix if isdense(A): if (np.issubdtype(A.dtype, np.complexfloating) or np.imag(sigma) == 0): A = np.copy(A) else: A = A + 0j A.flat[::A.shape[1] + 1] -= sigma return LuInv(A).matvec elif isspmatrix(A): A = A - sigma * identity(A.shape[0]) if symmetric and isspmatrix_csr(A): A = A.T return SpLuInv(A.tocsc()).matvec else: return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma, tol=tol).matvec else: if ((not isdense(A) and not isspmatrix(A)) or (not isdense(M) and not isspmatrix(M))): return IterOpInv(_aslinearoperator_with_dtype(A), _aslinearoperator_with_dtype(M), sigma, tol=tol).matvec elif isdense(A) or isdense(M): return LuInv(A - sigma * M).matvec else: OP = A - sigma * M if symmetric and isspmatrix_csr(OP): OP = OP.T return SpLuInv(OP.tocsc()).matvec def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, OPpart=None): """ Find k eigenvalues and eigenvectors of the square matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing \ the operation A * x, where A is a real or complex square matrix. k : int, default 6 The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. return_eigenvectors : boolean, default True Whether to return the eigenvectors along with the eigenvalues. M : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation M*x for the generalized eigenvalue problem ``A * x = w * M * x`` M must represent a real symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma==None, M is positive definite * If sigma is specified, M is positive semi-definite If sigma==None, eigs requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real or complex Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] * x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. For a real matrix A, shift-invert can either be done in imaginary mode or real mode, specified by the parameter OPpart ('r' or 'i'). Note that when sigma is specified, the keyword 'which' (below) refers to the shifted eigenvalues w'[i] where: * If A is real and OPpart == 'r' (default), w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ] * If A is real and OPpart == 'i', w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ] * If A is complex, w'[i] = 1/(w[i]-sigma) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and eigenvalues to find: - 'LM' : largest magnitude - 'SM' : smallest magnitude - 'LR' : largest real part - 'SR' : smallest real part - 'LI' : largest imaginary part - 'SI' : smallest imaginary part When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion) The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues Minv : N x N matrix, array, sparse matrix, or linear operator See notes in M, above. OPinv : N x N matrix, array, sparse matrix, or linear operator See notes in sigma, above. OPpart : 'r' or 'i'. See notes in sigma, above Returns ------- w : array Array of k eigenvalues. v : array An array of `k` eigenvectors. ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A svds : singular value decomposition for a matrix A Examples -------- Find 6 eigenvectors of the identity matrix: >>> from sklearn.utils.arpack import eigs >>> id = np.identity(13) >>> vals, vecs = eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape (13, 6) Notes ----- This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if OPpart is not None: raise ValueError("OPpart should not be specified with " "sigma = None or complex A") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: #sigma is not None: shift-invert mode if np.issubdtype(A.dtype, np.complexfloating): if OPpart is not None: raise ValueError("OPpart should not be specified " "with sigma=None or complex A") mode = 3 elif OPpart is None or OPpart.lower() == 'r': mode = 3 elif OPpart.lower() == 'i': if np.imag(sigma) == 0: raise ValueError("OPpart cannot be 'i' if sigma is real") mode = 4 else: raise ValueError("OPpart must be one of ('r','i')") matvec = _aslinearoperator_with_dtype(A).matvec if Minv is not None: raise ValueError("Minv should not be specified when sigma is") if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=False, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, mode='normal'): """ Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex hermitian matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation A * x, where A is a real symmetric matrix For buckling mode (see below) A must additionally be positive-definite k : integer The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. M : An N x N matrix, array, sparse matrix, or linear operator representing the operation M * x for the generalized eigenvalue problem ``A * x = w * M * x``. M must represent a real, symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma == None, M is symmetric positive definite * If sigma is specified, M is symmetric positive semi-definite * In buckling mode, M is symmetric indefinite. If sigma == None, eigsh requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. Note that when sigma is specified, the keyword 'which' refers to the shifted eigenvalues w'[i] where: - if mode == 'normal', w'[i] = 1 / (w[i] - sigma) - if mode == 'cayley', w'[i] = (w[i] + sigma) / (w[i] - sigma) - if mode == 'buckling', w'[i] = w[i] / (w[i] - sigma) (see further discussion in 'mode' below) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ncv > 2*k which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] If A is a complex hermitian matrix, 'BE' is invalid. Which `k` eigenvectors and eigenvalues to find - 'LM' : Largest (in magnitude) eigenvalues - 'SM' : Smallest (in magnitude) eigenvalues - 'LA' : Largest (algebraic) eigenvalues - 'SA' : Smallest (algebraic) eigenvalues - 'BE' : Half (k/2) from each end of the spectrum When k is odd, return one more (k/2+1) from the high end When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. Minv : N x N matrix, array, sparse matrix, or LinearOperator See notes in M, above OPinv : N x N matrix, array, sparse matrix, or LinearOperator See notes in sigma, above. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues mode : string ['normal' | 'buckling' | 'cayley'] Specify strategy to use for shift-invert mode. This argument applies only for real-valued A and sigma != None. For shift-invert mode, ARPACK internally solves the eigenvalue problem ``OP * x'[i] = w'[i] * B * x'[i]`` and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] into the desired eigenvectors and eigenvalues of the problem ``A * x[i] = w[i] * M * x[i]``. The modes are as follows: - 'normal' : OP = [A - sigma * M]^-1 * M B = M w'[i] = 1 / (w[i] - sigma) - 'buckling' : OP = [A - sigma * M]^-1 * A B = A w'[i] = w[i] / (w[i] - sigma) - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M] B = M w'[i] = (w[i] + sigma) / (w[i] - sigma) The choice of mode will affect which eigenvalues are selected by the keyword 'which', and can also impact the stability of convergence (see [2] for a discussion) Returns ------- w : array Array of k eigenvalues v : array An array of k eigenvectors The v[i] is the eigenvector corresponding to the eigenvector w[i] Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD functions which use the Implicitly Restarted Lanczos Method to find the eigenvalues and eigenvectors [2]_. Examples -------- >>> from sklearn.utils.arpack import eigsh >>> id = np.identity(13) >>> vals, vecs = eigsh(id, k=6) >>> vals # doctest: +SKIP array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> print(vecs.shape) (13, 6) References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ # complex hermitian matrices should be solved with eigs if np.issubdtype(A.dtype, np.complexfloating): if mode != 'normal': raise ValueError("mode=%s cannot be used with " "complex matrix A" % mode) if which == 'BE': raise ValueError("which='BE' cannot be used with complex matrix A") elif which == 'LA': which = 'LR' elif which == 'SA': which = 'SR' ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=return_eigenvectors, Minv=Minv, OPinv=OPinv) if return_eigenvectors: return ret[0].real, ret[1] else: return ret.real if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: A = _aslinearoperator_with_dtype(A) matvec = A.matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: # sigma is not None: shift-invert mode if Minv is not None: raise ValueError("Minv should not be specified when sigma is") # normal mode if mode == 'normal': mode = 3 matvec = None if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M = _aslinearoperator_with_dtype(M) M_matvec = M.matvec # buckling mode elif mode == 'buckling': mode = 4 if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec matvec = _aslinearoperator_with_dtype(A).matvec M_matvec = None # cayley-transform mode elif mode == 'cayley': mode = 5 matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec # unrecognized mode else: raise ValueError("unrecognized mode '%s'" % mode) params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _svds(A, k=6, ncv=None, tol=0): """Compute k singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- A : sparse matrix Array to compute the SVD on k : int, optional Number of singular values and vectors to compute. ncv : integer The number of Lanczos vectors generated ncv must be greater than k+1 and smaller than n; it is recommended that ncv > 2*k tol : float, optional Tolerance for singular values. Zero (default) means machine precision. Notes ----- This is a naive implementation using an eigensolver on A.H * A or A * A.H, depending on which one is more efficient. """ if not (isinstance(A, np.ndarray) or isspmatrix(A)): A = np.asarray(A) n, m = A.shape if np.issubdtype(A.dtype, np.complexfloating): herm = lambda x: x.T.conjugate() eigensolver = eigs else: herm = lambda x: x.T eigensolver = eigsh if n > m: X = A XH = herm(A) else: XH = A X = herm(A) if hasattr(XH, 'dot'): def matvec_XH_X(x): return XH.dot(X.dot(x)) else: def matvec_XH_X(x): return np.dot(XH, np.dot(X, x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, shape=(X.shape[1], X.shape[1])) # Ignore deprecation warnings here: dot on matrices is deprecated, # but this code is a backport anyhow with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2) s = np.sqrt(eigvals) if n > m: v = eigvec if hasattr(X, 'dot'): u = X.dot(v) / s else: u = np.dot(X, v) / s vh = herm(v) else: u = eigvec if hasattr(X, 'dot'): vh = herm(X.dot(u) / s) else: vh = herm(np.dot(X, u) / s) return u, s, vh # check if backport is actually needed: if scipy.version.version >= LooseVersion('0.10'): from scipy.sparse.linalg import eigs, eigsh, svds else: eigs, eigsh, svds = _eigs, _eigsh, _svds
bsd-3-clause
scollis/EGU16
cluster/profile_mpi0/ipython_console_config.py
4
21693
# Configuration file for ipython-console. c = get_config() #------------------------------------------------------------------------------ # ZMQTerminalIPythonApp configuration #------------------------------------------------------------------------------ # ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp, # BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp, # ConnectionFileMixin # Run the file referenced by the PYTHONSTARTUP environment variable at IPython # startup. # c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True # Whether to install the default config files into the profile dir. If a new # profile is being created, and IPython contains config files for that profile, # then they will be staged into the new directory. Otherwise, default config # files will be automatically generated. # c.ZMQTerminalIPythonApp.copy_config_files = False # Set the kernel's IP address [default localhost]. If the IP address is # something other than localhost, then Consoles on other machines will be able # to connect to the Kernel, so be careful! # c.ZMQTerminalIPythonApp.ip = u'' # Pre-load matplotlib and numpy for interactive use, selecting a particular # matplotlib backend and loop integration. # c.ZMQTerminalIPythonApp.pylab = None # Connect to an already running kernel # c.ZMQTerminalIPythonApp.existing = '' # Run the module as a script. # c.ZMQTerminalIPythonApp.module_to_run = '' # The date format used by logging formatters for %(asctime)s # c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S' # set the shell (ROUTER) port [default: random] # c.ZMQTerminalIPythonApp.shell_port = 0 # Whether to overwrite existing config files when copying # c.ZMQTerminalIPythonApp.overwrite = False # Execute the given command string. # c.ZMQTerminalIPythonApp.code_to_run = '' # set the stdin (ROUTER) port [default: random] # c.ZMQTerminalIPythonApp.stdin_port = 0 # Set the log level by value or name. # c.ZMQTerminalIPythonApp.log_level = 30 # Path to the ssh key to use for logging in to the ssh server. # c.ZMQTerminalIPythonApp.sshkey = '' # lines of code to run at IPython startup. # c.ZMQTerminalIPythonApp.exec_lines = [] # Suppress warning messages about legacy config files # c.ZMQTerminalIPythonApp.ignore_old_config = False # Path to an extra config file to load. # # If specified, load this config file in addition to any other IPython config. # c.ZMQTerminalIPythonApp.extra_config_file = u'' # set the control (ROUTER) port [default: random] # c.ZMQTerminalIPythonApp.control_port = 0 # set the heartbeat port [default: random] # c.ZMQTerminalIPythonApp.hb_port = 0 # The SSH server to use to connect to the kernel. # c.ZMQTerminalIPythonApp.sshserver = '' # Should variables loaded at startup (by startup files, exec_lines, etc.) be # hidden from tools like %who? # c.ZMQTerminalIPythonApp.hide_initial_ns = True # dotted module name of an IPython extension to load. # c.ZMQTerminalIPythonApp.extra_extension = '' # A file to be run # c.ZMQTerminalIPythonApp.file_to_run = '' # The IPython profile to use. # c.ZMQTerminalIPythonApp.profile = u'default' # JSON file in which to store connection info [default: kernel-<pid>.json] # # This file will contain the IP, ports, and authentication key needed to connect # clients to this kernel. By default, this file will be created in the security # dir of the current profile, but can be specified by absolute path. # c.ZMQTerminalIPythonApp.connection_file = '' # If a command or file is given via the command-line, e.g. 'ipython foo.py', # start an interactive shell after executing the file or command. # c.ZMQTerminalIPythonApp.force_interact = False # If true, IPython will populate the user namespace with numpy, pylab, etc. and # an ``import *`` is done from numpy and pylab, when using pylab mode. # # When False, pylab mode should not import any names into the user namespace. # c.ZMQTerminalIPythonApp.pylab_import_all = True # Set to display confirmation dialog on exit. You can always use 'exit' or # 'quit', to force a direct exit without any confirmation. # c.ZMQTerminalIPythonApp.confirm_exit = True # The name of the IPython directory. This directory is used for logging # configuration (through profiles), history storage, etc. The default is usually # $HOME/.ipython. This option can also be specified through the environment # variable IPYTHONDIR. # c.ZMQTerminalIPythonApp.ipython_dir = u'' # Configure matplotlib for interactive use with the default matplotlib backend. # c.ZMQTerminalIPythonApp.matplotlib = None # Whether to display a banner upon starting IPython. # c.ZMQTerminalIPythonApp.display_banner = True # Create a massive crash report when IPython encounters what may be an internal # error. The default is to append a short message to the usual traceback # c.ZMQTerminalIPythonApp.verbose_crash = False # List of files to run at IPython startup. # c.ZMQTerminalIPythonApp.exec_files = [] # Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx', # 'pyglet', 'qt', 'qt5', 'tk', 'wx'). # c.ZMQTerminalIPythonApp.gui = None # Reraise exceptions encountered loading IPython extensions? # c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False # A list of dotted module names of IPython extensions to load. # c.ZMQTerminalIPythonApp.extensions = [] # Start IPython quickly by skipping the loading of config files. # c.ZMQTerminalIPythonApp.quick = False # The name of the default kernel to start. # c.ZMQTerminalIPythonApp.kernel_name = 'python' # The Logging format template # c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s' # set the iopub (PUB) port [default: random] # c.ZMQTerminalIPythonApp.iopub_port = 0 # # c.ZMQTerminalIPythonApp.transport = 'tcp' #------------------------------------------------------------------------------ # ZMQTerminalInteractiveShell configuration #------------------------------------------------------------------------------ # A subclass of TerminalInteractiveShell that uses the 0MQ kernel # ZMQTerminalInteractiveShell will inherit config from: # TerminalInteractiveShell, InteractiveShell # auto editing of files with syntax errors. # c.ZMQTerminalInteractiveShell.autoedit_syntax = False # Timeout for giving up on a kernel (in seconds). # # On first connect and restart, the console tests whether the kernel is running # and responsive by sending kernel_info_requests. This sets the timeout in # seconds for how long the kernel can take before being presumed dead. # c.ZMQTerminalInteractiveShell.kernel_timeout = 60 # Use colors for displaying information about objects. Because this information # is passed through a pager (like 'less'), and some pagers get confused with # color codes, this capability can be turned off. # c.ZMQTerminalInteractiveShell.color_info = True # A list of ast.NodeTransformer subclass instances, which will be applied to # user input before code is run. # c.ZMQTerminalInteractiveShell.ast_transformers = [] # # c.ZMQTerminalInteractiveShell.history_length = 10000 # Don't call post-execute functions that have failed in the past. # c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False # Show rewritten input, e.g. for autocall. # c.ZMQTerminalInteractiveShell.show_rewritten_input = True # Handler for image type output. This is useful, for example, when connecting # to the kernel in which pylab inline backend is activated. There are four # handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream': # Use an external program to show the image. Image will be fed into the STDIN # of the program. You will need to configure `stream_image_handler`; # 'tempfile': Use an external program to show the image. Image will be saved in # a temporally file and the program is called with the temporally file. You # will need to configure `tempfile_image_handler`; 'callable': You can set any # Python callable which is called with the image data. You will need to # configure `callable_image_handler`. # c.ZMQTerminalInteractiveShell.image_handler = None # Set the color scheme (NoColor, Linux, or LightBG). # c.ZMQTerminalInteractiveShell.colors = 'Linux' # If True, anything that would be passed to the pager will be displayed as # regular output instead. # c.ZMQTerminalInteractiveShell.display_page = False # Autoindent IPython code entered interactively. # c.ZMQTerminalInteractiveShell.autoindent = True # # c.ZMQTerminalInteractiveShell.separate_in = '\n' # Command to invoke an image viewer program when you are using 'stream' image # handler. This option is a list of string where the first element is the # command itself and reminders are the options for the command. Raw image data # is given as STDIN to the program. # c.ZMQTerminalInteractiveShell.stream_image_handler = [] # Deprecated, use PromptManager.in2_template # c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: ' # # c.ZMQTerminalInteractiveShell.separate_out = '' # Deprecated, use PromptManager.in_template # c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: ' # Make IPython automatically call any callable object even if you didn't type # explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically. # The value can be '0' to disable the feature, '1' for 'smart' autocall, where # it is not applied if there are no more arguments on the line, and '2' for # 'full' autocall, where all callable objects are automatically called (even if # no arguments are present). # c.ZMQTerminalInteractiveShell.autocall = 0 # Number of lines of your screen, used to control printing of very long strings. # Strings longer than this number of lines will be sent through a pager instead # of directly printed. The default value for this is 0, which means IPython # will auto-detect your screen size every time it needs to print certain # potentially long strings (this doesn't change the behavior of the 'print' # keyword, it's only triggered internally). If for some reason this isn't # working well (it needs curses support), specify it yourself. Otherwise don't # change the default. # c.ZMQTerminalInteractiveShell.screen_length = 0 # Set the editor used by IPython (default to $EDITOR/vi/notepad). # c.ZMQTerminalInteractiveShell.editor = 'vi' # Deprecated, use PromptManager.justify # c.ZMQTerminalInteractiveShell.prompts_pad_left = True # The part of the banner to be printed before the profile # c.ZMQTerminalInteractiveShell.banner1 = 'Python 2.7.10 |Anaconda 1.9.1 (64-bit)| (default, May 28 2015, 17:02:03) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://anaconda.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n' # # c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard'] # The part of the banner to be printed after the profile # c.ZMQTerminalInteractiveShell.banner2 = '' # Whether to include output from clients other than this one sharing the same # kernel. # # Outputs are not displayed until enter is pressed. # c.ZMQTerminalInteractiveShell.include_other_output = False # # c.ZMQTerminalInteractiveShell.separate_out2 = '' # Command to invoke an image viewer program when you are using 'tempfile' image # handler. This option is a list of string where the first element is the # command itself and reminders are the options for the command. You can use # {file} and {format} in the string to represent the location of the generated # image file and image format. # c.ZMQTerminalInteractiveShell.tempfile_image_handler = [] # Preferred object representation MIME type in order. First matched MIME type # will be used. # c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml'] # # c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True # Prefix to add to outputs coming from clients other than this one. # # Only relevant if include_other_output is True. # c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] ' # # c.ZMQTerminalInteractiveShell.debug = False # # c.ZMQTerminalInteractiveShell.object_info_string_level = 0 # # c.ZMQTerminalInteractiveShell.ipython_dir = '' # # c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~' # Start logging to the default log file in overwrite mode. Use `logappend` to # specify a log file to **append** logs to. # c.ZMQTerminalInteractiveShell.logstart = False # The name of the logfile to use. # c.ZMQTerminalInteractiveShell.logfile = '' # The shell program to be used for paging. # c.ZMQTerminalInteractiveShell.pager = 'less' # Enable magic commands to be called without the leading %. # c.ZMQTerminalInteractiveShell.automagic = True # Save multi-line entries as one entry in readline history # c.ZMQTerminalInteractiveShell.multiline_history = True # # c.ZMQTerminalInteractiveShell.readline_use = True # Callable object called via 'callable' image handler with one argument, `data`, # which is `msg["content"]["data"]` where `msg` is the message from iopub # channel. For exmaple, you can find base64 encoded PNG data as # `data['image/png']`. # c.ZMQTerminalInteractiveShell.callable_image_handler = None # Enable deep (recursive) reloading by default. IPython can use the deep_reload # module which reloads changes in modules recursively (it replaces the reload() # function, so you don't need to change anything to use it). deep_reload() # forces a full reload of modules whose code may have changed, which the default # reload() function does not. When deep_reload is off, IPython will use the # normal reload(), but deep_reload will still be available as dreload(). # c.ZMQTerminalInteractiveShell.deep_reload = False # Start logging to the given file in append mode. Use `logfile` to specify a log # file to **overwrite** logs to. # c.ZMQTerminalInteractiveShell.logappend = '' # # c.ZMQTerminalInteractiveShell.xmode = 'Context' # # c.ZMQTerminalInteractiveShell.quiet = False # Enable auto setting the terminal title. # c.ZMQTerminalInteractiveShell.term_title = False # Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, # Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a # direct exit without any confirmation. # c.ZMQTerminalInteractiveShell.confirm_exit = True # Deprecated, use PromptManager.out_template # c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: ' # Set the size of the output cache. The default is 1000, you can change it # permanently in your config file. Setting it to 0 completely disables the # caching system, and the minimum value accepted is 20 (if you provide a value # less than 20, it is reset to 0 and a warning is issued). This limit is # defined because otherwise you'll spend more time re-flushing a too small cache # than working # c.ZMQTerminalInteractiveShell.cache_size = 1000 # 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run # interactively (displaying output from expressions). # c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr' # Automatically call the pdb debugger after every exception. # c.ZMQTerminalInteractiveShell.pdb = False #------------------------------------------------------------------------------ # KernelManager configuration #------------------------------------------------------------------------------ # Manages a single kernel in a subprocess on this host. # # This version starts kernels with Popen. # KernelManager will inherit config from: ConnectionFileMixin # DEPRECATED: Use kernel_name instead. # # The Popen Command to launch the kernel. Override this if you have a custom # kernel. If kernel_cmd is specified in a configuration file, IPython does not # pass any arguments to the kernel, because it cannot make any assumptions about # the arguments that the kernel understands. In particular, this means that the # kernel does not receive the option --debug if it given on the IPython command # line. # c.KernelManager.kernel_cmd = [] # Should we autorestart the kernel if it dies. # c.KernelManager.autorestart = False # set the stdin (ROUTER) port [default: random] # c.KernelManager.stdin_port = 0 # Set the kernel's IP address [default localhost]. If the IP address is # something other than localhost, then Consoles on other machines will be able # to connect to the Kernel, so be careful! # c.KernelManager.ip = u'' # JSON file in which to store connection info [default: kernel-<pid>.json] # # This file will contain the IP, ports, and authentication key needed to connect # clients to this kernel. By default, this file will be created in the security # dir of the current profile, but can be specified by absolute path. # c.KernelManager.connection_file = '' # set the control (ROUTER) port [default: random] # c.KernelManager.control_port = 0 # set the heartbeat port [default: random] # c.KernelManager.hb_port = 0 # set the shell (ROUTER) port [default: random] # c.KernelManager.shell_port = 0 # # c.KernelManager.transport = 'tcp' # set the iopub (PUB) port [default: random] # c.KernelManager.iopub_port = 0 #------------------------------------------------------------------------------ # ProfileDir configuration #------------------------------------------------------------------------------ # An object to manage the profile directory and its resources. # # The profile directory is used by all IPython applications, to manage # configuration, logging and security. # # This object knows how to find, create and manage these directories. This # should be used by any code that wants to handle profiles. # Set the profile location directly. This overrides the logic used by the # `profile` option. # c.ProfileDir.location = u'' #------------------------------------------------------------------------------ # Session configuration #------------------------------------------------------------------------------ # Object for handling serialization and sending of messages. # # The Session object handles building messages and sending them with ZMQ sockets # or ZMQStream objects. Objects can communicate with each other over the # network via Session objects, and only need to work with the dict-based IPython # message spec. The Session will handle serialization/deserialization, security, # and metadata. # # Sessions support configurable serialization via packer/unpacker traits, and # signing with HMAC digests via the key/keyfile traits. # # Parameters ---------- # # debug : bool # whether to trigger extra debugging statements # packer/unpacker : str : 'json', 'pickle' or import_string # importstrings for methods to serialize message parts. If just # 'json' or 'pickle', predefined JSON and pickle packers will be used. # Otherwise, the entire importstring must be used. # # The functions must accept at least valid JSON input, and output *bytes*. # # For example, to use msgpack: # packer = 'msgpack.packb', unpacker='msgpack.unpackb' # pack/unpack : callables # You can also set the pack/unpack callables for serialization directly. # session : bytes # the ID of this Session object. The default is to generate a new UUID. # username : unicode # username added to message headers. The default is to ask the OS. # key : bytes # The key used to initialize an HMAC signature. If unset, messages # will not be signed or checked. # keyfile : filepath # The file containing a key. If this is set, `key` will be initialized # to the contents of the file. # Username for the Session. Default is your system username. # c.Session.username = u'scollis' # The name of the unpacker for unserializing messages. Only used with custom # functions for `packer`. # c.Session.unpacker = 'json' # Threshold (in bytes) beyond which a buffer should be sent without copying. # c.Session.copy_threshold = 65536 # The name of the packer for serializing messages. Should be one of 'json', # 'pickle', or an import name for a custom callable serializer. # c.Session.packer = 'json' # The maximum number of digests to remember. # # The digest history will be culled when it exceeds this value. # c.Session.digest_history_size = 65536 # The UUID identifying this session. # c.Session.session = u'' # The digest scheme used to construct the message signatures. Must have the form # 'hmac-HASH'. # c.Session.signature_scheme = 'hmac-sha256' # execution key, for signing messages. # c.Session.key = '' # Debug output in the Session # c.Session.debug = False # The maximum number of items for a container to be introspected for custom # serialization. Containers larger than this are pickled outright. # c.Session.item_threshold = 64 # path to file containing execution key. # c.Session.keyfile = '' # Threshold (in bytes) beyond which an object's buffer should be extracted to # avoid pickling. # c.Session.buffer_threshold = 1024 # Metadata dictionary, which serves as the default top-level metadata dict for # each message. # c.Session.metadata = {}
bsd-3-clause
plotly/python-api
packages/python/plotly/plotly/matplotlylib/mplexporter/utils.py
1
11907
""" Utility Routines for Working with Matplotlib Objects ==================================================== """ import itertools import io import base64 import numpy as np import warnings import matplotlib from matplotlib.colors import colorConverter from matplotlib.path import Path from matplotlib.markers import MarkerStyle from matplotlib.transforms import Affine2D from matplotlib import ticker def export_color(color): """Convert matplotlib color code to hex color or RGBA color""" if color is None or colorConverter.to_rgba(color)[3] == 0: return 'none' elif colorConverter.to_rgba(color)[3] == 1: rgb = colorConverter.to_rgb(color) return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb)) else: c = colorConverter.to_rgba(color) return "rgba(" + ", ".join(str(int(np.round(val * 255))) for val in c[:3])+', '+str(c[3])+")" def _many_to_one(input_dict): """Convert a many-to-one mapping to a one-to-one mapping""" return dict((key, val) for keys, val in input_dict.items() for key in keys) LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none', ('dashed', '--'): "6,6", ('dotted', ':'): "2,2", ('dashdot', '-.'): "4,4,2,4", ('', ' ', 'None', 'none'): None}) def get_dasharray(obj): """Get an SVG dash array for the given matplotlib linestyle Parameters ---------- obj : matplotlib object The matplotlib line or path object, which must have a get_linestyle() method which returns a valid matplotlib line code Returns ------- dasharray : string The HTML/SVG dasharray code associated with the object. """ if obj.__dict__.get('_dashSeq', None) is not None: return ','.join(map(str, obj._dashSeq)) else: ls = obj.get_linestyle() dasharray = LINESTYLES.get(ls, 'not found') if dasharray == 'not found': warnings.warn("line style '{0}' not understood: " "defaulting to solid line.".format(ls)) dasharray = LINESTYLES['solid'] return dasharray PATH_DICT = {Path.LINETO: 'L', Path.MOVETO: 'M', Path.CURVE3: 'S', Path.CURVE4: 'C', Path.CLOSEPOLY: 'Z'} def SVG_path(path, transform=None, simplify=False): """Construct the vertices and SVG codes for the path Parameters ---------- path : matplotlib.Path object transform : matplotlib transform (optional) if specified, the path will be transformed before computing the output. Returns ------- vertices : array The shape (M, 2) array of vertices of the Path. Note that some Path codes require multiple vertices, so the length of these vertices may be longer than the list of path codes. path_codes : list A length N list of single-character path codes, N <= M. Each code is a single character, in ['L','M','S','C','Z']. See the standard SVG path specification for a description of these. """ if transform is not None: path = path.transformed(transform) vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [], PATH_DICT[path_code]) for (vertices, path_code) in path.iter_segments(simplify=simplify)] if not vc_tuples: # empty path is a special case return np.zeros((0, 2)), [] else: vertices, codes = zip(*vc_tuples) vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2) return vertices, list(codes) def get_path_style(path, fill=True): """Get the style dictionary for matplotlib path objects""" style = {} style['alpha'] = path.get_alpha() if style['alpha'] is None: style['alpha'] = 1 style['edgecolor'] = export_color(path.get_edgecolor()) if fill: style['facecolor'] = export_color(path.get_facecolor()) else: style['facecolor'] = 'none' style['edgewidth'] = path.get_linewidth() style['dasharray'] = get_dasharray(path) style['zorder'] = path.get_zorder() return style def get_line_style(line): """Get the style dictionary for matplotlib line objects""" style = {} style['alpha'] = line.get_alpha() if style['alpha'] is None: style['alpha'] = 1 style['color'] = export_color(line.get_color()) style['linewidth'] = line.get_linewidth() style['dasharray'] = get_dasharray(line) style['zorder'] = line.get_zorder() style['drawstyle'] = line.get_drawstyle() return style def get_marker_style(line): """Get the style dictionary for matplotlib marker objects""" style = {} style['alpha'] = line.get_alpha() if style['alpha'] is None: style['alpha'] = 1 style['facecolor'] = export_color(line.get_markerfacecolor()) style['edgecolor'] = export_color(line.get_markeredgecolor()) style['edgewidth'] = line.get_markeredgewidth() style['marker'] = line.get_marker() markerstyle = MarkerStyle(line.get_marker()) markersize = line.get_markersize() markertransform = (markerstyle.get_transform() + Affine2D().scale(markersize, -markersize)) style['markerpath'] = SVG_path(markerstyle.get_path(), markertransform) style['markersize'] = markersize style['zorder'] = line.get_zorder() return style def get_text_style(text): """Return the text style dict for a text instance""" style = {} style['alpha'] = text.get_alpha() if style['alpha'] is None: style['alpha'] = 1 style['fontsize'] = text.get_size() style['color'] = export_color(text.get_color()) style['halign'] = text.get_horizontalalignment() # left, center, right style['valign'] = text.get_verticalalignment() # baseline, center, top style['malign'] = text._multialignment # text alignment when '\n' in text style['rotation'] = text.get_rotation() style['zorder'] = text.get_zorder() return style def get_axis_properties(axis): """Return the property dictionary for a matplotlib.Axis instance""" props = {} label1On = axis._major_tick_kw.get('label1On', True) if isinstance(axis, matplotlib.axis.XAxis): if label1On: props['position'] = "bottom" else: props['position'] = "top" elif isinstance(axis, matplotlib.axis.YAxis): if label1On: props['position'] = "left" else: props['position'] = "right" else: raise ValueError("{0} should be an Axis instance".format(axis)) # Use tick values if appropriate locator = axis.get_major_locator() props['nticks'] = len(locator()) if isinstance(locator, ticker.FixedLocator): props['tickvalues'] = list(locator()) else: props['tickvalues'] = None # Find tick formats formatter = axis.get_major_formatter() if isinstance(formatter, ticker.NullFormatter): props['tickformat'] = "" elif isinstance(formatter, ticker.FixedFormatter): props['tickformat'] = list(formatter.seq) elif not any(label.get_visible() for label in axis.get_ticklabels()): props['tickformat'] = "" else: props['tickformat'] = None # Get axis scale props['scale'] = axis.get_scale() # Get major tick label size (assumes that's all we really care about!) labels = axis.get_ticklabels() if labels: props['fontsize'] = labels[0].get_fontsize() else: props['fontsize'] = None # Get associated grid props['grid'] = get_grid_style(axis) # get axis visibility props['visible'] = axis.get_visible() return props def get_grid_style(axis): gridlines = axis.get_gridlines() if axis._gridOnMajor and len(gridlines) > 0: color = export_color(gridlines[0].get_color()) alpha = gridlines[0].get_alpha() dasharray = get_dasharray(gridlines[0]) return dict(gridOn=True, color=color, dasharray=dasharray, alpha=alpha) else: return {"gridOn": False} def get_figure_properties(fig): return {'figwidth': fig.get_figwidth(), 'figheight': fig.get_figheight(), 'dpi': fig.dpi} def get_axes_properties(ax): props = {'axesbg': export_color(ax.patch.get_facecolor()), 'axesbgalpha': ax.patch.get_alpha(), 'bounds': ax.get_position().bounds, 'dynamic': ax.get_navigate(), 'axison': ax.axison, 'frame_on': ax.get_frame_on(), 'patch_visible':ax.patch.get_visible(), 'axes': [get_axis_properties(ax.xaxis), get_axis_properties(ax.yaxis)]} for axname in ['x', 'y']: axis = getattr(ax, axname + 'axis') domain = getattr(ax, 'get_{0}lim'.format(axname))() lim = domain if isinstance(axis.converter, matplotlib.dates.DateConverter): scale = 'date' try: import pandas as pd from pandas.tseries.converter import PeriodConverter except ImportError: pd = None if (pd is not None and isinstance(axis.converter, PeriodConverter)): _dates = [pd.Period(ordinal=int(d), freq=axis.freq) for d in domain] domain = [(d.year, d.month - 1, d.day, d.hour, d.minute, d.second, 0) for d in _dates] else: domain = [(d.year, d.month - 1, d.day, d.hour, d.minute, d.second, d.microsecond * 1E-3) for d in matplotlib.dates.num2date(domain)] else: scale = axis.get_scale() if scale not in ['date', 'linear', 'log']: raise ValueError("Unknown axis scale: " "{0}".format(axis.get_scale())) props[axname + 'scale'] = scale props[axname + 'lim'] = lim props[axname + 'domain'] = domain return props def iter_all_children(obj, skipContainers=False): """ Returns an iterator over all childen and nested children using obj's get_children() method if skipContainers is true, only childless objects are returned. """ if hasattr(obj, 'get_children') and len(obj.get_children()) > 0: for child in obj.get_children(): if not skipContainers: yield child # could use `yield from` in python 3... for grandchild in iter_all_children(child, skipContainers): yield grandchild else: yield obj def get_legend_properties(ax, legend): handles, labels = ax.get_legend_handles_labels() visible = legend.get_visible() return {'handles': handles, 'labels': labels, 'visible': visible} def image_to_base64(image): """ Convert a matplotlib image to a base64 png representation Parameters ---------- image : matplotlib image object The image to be converted. Returns ------- image_base64 : string The UTF8-encoded base64 string representation of the png image. """ ax = image.axes binary_buffer = io.BytesIO() # image is saved in axes coordinates: we need to temporarily # set the correct limits to get the correct image lim = ax.axis() ax.axis(image.get_extent()) image.write_png(binary_buffer) ax.axis(lim) binary_buffer.seek(0) return base64.b64encode(binary_buffer.read()).decode('utf-8')
mit
morrisonwudi/zipline
tests/test_algorithm_gen.py
18
7339
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from nose.tools import ( timed, nottest ) from datetime import datetime import pandas as pd import pytz from zipline.finance import trading from zipline.algorithm import TradingAlgorithm from zipline.finance import slippage from zipline.utils import factory from zipline.utils.factory import create_simulation_parameters from zipline.utils.test_utils import ( setup_logger, teardown_logger ) from zipline.protocol import ( Event, DATASOURCE_TYPE ) DEFAULT_TIMEOUT = 15 # seconds EXTENDED_TIMEOUT = 90 class RecordDateSlippage(slippage.FixedSlippage): def __init__(self, spread): super(RecordDateSlippage, self).__init__(spread=spread) self.latest_date = None def simulate(self, event, open_orders): self.latest_date = event.dt result = super(RecordDateSlippage, self).simulate(event, open_orders) return result class TestAlgo(TradingAlgorithm): def __init__(self, asserter, *args, **kwargs): super(TestAlgo, self).__init__(*args, **kwargs) self.asserter = asserter def initialize(self, window_length=100): self.latest_date = None self.set_slippage(RecordDateSlippage(spread=0.05)) self.stocks = [self.sid(8229)] self.ordered = False self.num_bars = 0 def handle_data(self, data): self.num_bars += 1 self.latest_date = self.get_datetime() if not self.ordered: for stock in self.stocks: self.order(stock, 100) self.ordered = True else: self.asserter.assertGreaterEqual( self.latest_date, self.slippage.latest_date ) class AlgorithmGeneratorTestCase(TestCase): def setUp(self): setup_logger(self) def tearDown(self): teardown_logger(self) @nottest def test_lse_algorithm(self): lse = trading.TradingEnvironment( bm_symbol='^FTSE', exchange_tz='Europe/London' ) with lse: sim_params = factory.create_simulation_parameters( start=datetime(2012, 5, 1, tzinfo=pytz.utc), end=datetime(2012, 6, 30, tzinfo=pytz.utc) ) algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params) trade_source = factory.create_daily_trade_source( [8229], 200, sim_params ) algo.set_sources([trade_source]) gen = algo.get_generator() results = list(gen) self.assertEqual(len(results), 42) # May 7, 2012 was an LSE holiday, confirm the 4th trading # day was May 8. self.assertEqual(results[4]['daily_perf']['period_open'], datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc)) @timed(DEFAULT_TIMEOUT) def test_generator_dates(self): """ Ensure the pipeline of generators are in sync, at least as far as their current dates. """ sim_params = factory.create_simulation_parameters( start=datetime(2011, 7, 30, tzinfo=pytz.utc), end=datetime(2012, 7, 30, tzinfo=pytz.utc) ) algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params) trade_source = factory.create_daily_trade_source( [8229], sim_params ) algo.set_sources([trade_source]) gen = algo.get_generator() self.assertTrue(list(gen)) self.assertTrue(algo.slippage.latest_date) self.assertTrue(algo.latest_date) @timed(DEFAULT_TIMEOUT) def test_handle_data_on_market(self): """ Ensure that handle_data is only called on market minutes. i.e. events that come in at midnight should be processed at market open. """ from zipline.finance.trading import SimulationParameters sim_params = SimulationParameters( period_start=datetime(2012, 7, 30, tzinfo=pytz.utc), period_end=datetime(2012, 7, 30, tzinfo=pytz.utc), data_frequency='minute' ) algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params) midnight_custom_source = [Event({ 'custom_field': 42.0, 'sid': 'custom_data', 'source_id': 'TestMidnightSource', 'dt': pd.Timestamp('2012-07-30', tz='UTC'), 'type': DATASOURCE_TYPE.CUSTOM })] minute_event_source = [Event({ 'volume': 100, 'price': 200.0, 'high': 210.0, 'open_price': 190.0, 'low': 180.0, 'sid': 8229, 'source_id': 'TestMinuteEventSource', 'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern'). tz_convert('UTC'), 'type': DATASOURCE_TYPE.TRADE })] algo.set_sources([midnight_custom_source, minute_event_source]) gen = algo.get_generator() # Consume the generator list(gen) # Though the events had different time stamps, handle data should # have only been called once, at the market open. self.assertEqual(algo.num_bars, 1) @timed(DEFAULT_TIMEOUT) def test_progress(self): """ Ensure the pipeline of generators are in sync, at least as far as their current dates. """ sim_params = factory.create_simulation_parameters( start=datetime(2008, 1, 1, tzinfo=pytz.utc), end=datetime(2008, 1, 5, tzinfo=pytz.utc) ) algo = TestAlgo(self, sim_params=sim_params) trade_source = factory.create_daily_trade_source( [8229], sim_params ) algo.set_sources([trade_source]) gen = algo.get_generator() results = list(gen) self.assertEqual(results[-2]['progress'], 1.0) def test_benchmark_times_match_market_close_for_minutely_data(self): """ Benchmark dates should be adjusted so that benchmark events are emitted at the end of each trading day when working with minutely data. Verification relies on the fact that there are no trades so algo.datetime should be equal to the last benchmark time. See https://github.com/quantopian/zipline/issues/241 """ sim_params = create_simulation_parameters(num_days=1, data_frequency='minute') algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229]) algo.run(source=[], overwrite_sim_params=False) self.assertEqual(algo.datetime, sim_params.last_close)
apache-2.0
pratapvardhan/scikit-learn
sklearn/ensemble/voting_classifier.py
11
8661
""" Soft Voting/Majority Rule classifier. This module contains a Soft Voting/Majority Rule classifier for classification estimators. """ # Authors: Sebastian Raschka <se.raschka@gmail.com>, # Gilles Louppe <g.louppe@gmail.com> # # Licence: BSD 3 clause import numpy as np from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import TransformerMixin from ..base import clone from ..preprocessing import LabelEncoder from ..externals import six from ..exceptions import NotFittedError from ..utils.validation import check_is_fitted class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin): """Soft Voting/Majority Rule classifier for unfitted estimators. .. versionadded:: 0.17 Read more in the :ref:`User Guide <voting_classifier>`. Parameters ---------- estimators : list of (string, estimator) tuples Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones of those original estimators that will be stored in the class attribute `self.estimators_`. voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default=`None`) Sequence of weights (`float` or `int`) to weight the occurrences of predicted class labels (`hard` voting) or class probabilities before averaging (`soft` voting). Uses uniform weights if `None`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. classes_ : array-like, shape = [n_predictions] The classes labels. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> clf1 = LogisticRegression(random_state=1) >>> clf2 = RandomForestClassifier(random_state=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> """ def __init__(self, estimators, voting='hard', weights=None): self.estimators = estimators self.named_estimators = dict(estimators) self.voting = voting self.weights = weights def fit(self, X, y): """ Fit the estimators. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : object """ if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1: raise NotImplementedError('Multilabel and multi-output' ' classification is not supported.') if self.voting not in ('soft', 'hard'): raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)" % self.voting) if self.estimators is None or len(self.estimators) == 0: raise AttributeError('Invalid `estimators` attribute, `estimators`' ' should be a list of (string, estimator)' ' tuples') if self.weights and len(self.weights) != len(self.estimators): raise ValueError('Number of classifiers and weights must be equal' '; got %d weights, %d estimators' % (len(self.weights), len(self.estimators))) self.le_ = LabelEncoder() self.le_.fit(y) self.classes_ = self.le_.classes_ self.estimators_ = [] for name, clf in self.estimators: fitted_clf = clone(clf).fit(X, self.le_.transform(y)) self.estimators_.append(fitted_clf) return self def predict(self, X): """ Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- maj : array-like, shape = [n_samples] Predicted class labels. """ check_is_fitted(self, 'estimators_') if self.voting == 'soft': maj = np.argmax(self.predict_proba(X), axis=1) else: # 'hard' voting predictions = self._predict(X) maj = np.apply_along_axis(lambda x: np.argmax(np.bincount(x, weights=self.weights)), axis=1, arr=predictions.astype('int')) maj = self.le_.inverse_transform(maj) return maj def _collect_probas(self, X): """Collect results from clf.predict calls. """ return np.asarray([clf.predict_proba(X) for clf in self.estimators_]) def _predict_proba(self, X): """Predict class probabilities for X in 'soft' voting """ if self.voting == 'hard': raise AttributeError("predict_proba is not available when" " voting=%r" % self.voting) check_is_fitted(self, 'estimators_') avg = np.average(self._collect_probas(X), axis=0, weights=self.weights) return avg @property def predict_proba(self): """Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. """ return self._predict_proba def transform(self, X): """Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- If `voting='soft'`: array-like = [n_classifiers, n_samples, n_classes] Class probabilities calculated by each classifier. If `voting='hard'`: array-like = [n_classifiers, n_samples] Class labels predicted by each classifier. """ check_is_fitted(self, 'estimators_') if self.voting == 'soft': return self._collect_probas(X) else: return self._predict(X) def get_params(self, deep=True): """Return estimator parameter names for GridSearch support""" if not deep: return super(VotingClassifier, self).get_params(deep=False) else: out = super(VotingClassifier, self).get_params(deep=False) out.update(self.named_estimators.copy()) for name, step in six.iteritems(self.named_estimators): for key, value in six.iteritems(step.get_params(deep=True)): out['%s__%s' % (name, key)] = value return out def _predict(self, X): """Collect results from clf.predict calls. """ return np.asarray([clf.predict(X) for clf in self.estimators_]).T
bsd-3-clause
to266/hyperspy
hyperspy/drawing/_widgets/scalebar.py
2
5335
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. from hyperspy.misc.math_tools import closest_nice_number class ScaleBar(object): def __init__(self, ax, units, pixel_size=None, color='white', position=None, max_size_ratio=0.25, lw=2, length=None, animated=False): """Add a scale bar to an image. Parameteres ----------- ax : matplotlib axes The axes where to draw the scale bar. units : string pixel_size : {None, float} If None the axes of the image are supposed to be calibrated. Otherwise the pixel size must be specified. color : a valid matplotlib color position {None, (float, float)} If None the position is automatically determined. max_size_ratio : float The maximum size of the scale bar in respect to the length of the x axis lw : int The line width length : {None, float} If None the length is automatically calculated using the max_size_ratio. """ self.animated = animated self.ax = ax self.units = units self.pixel_size = pixel_size self.xmin, self.xmax = ax.get_xlim() self.ymin, self.ymax = ax.get_ylim() self.text = None self.line = None self.tex_bold = False if length is None: self.calculate_size(max_size_ratio=max_size_ratio) else: self.length = length if position is None: self.position = self.calculate_line_position() else: self.position = position self.calculate_text_position() self.plot_scale(line_width=lw) self.set_color(color) def get_units_string(self): if self.tex_bold is True: if (self.units[0] and self.units[-1]) == '$': return r'$\mathbf{%g\,%s}$' % \ (self.length, self.units[1:-1]) else: return r'$\mathbf{%g\,}$\textbf{%s}' % \ (self.length, self.units) else: return r'$%g\,$%s' % (self.length, self.units) def calculate_line_position(self, pad=0.05): return ((1 - pad) * self.xmin + pad * self.xmax, (1 - pad) * self.ymin + pad * self.ymax) def calculate_text_position(self, pad=1 / 100.): ps = self.pixel_size if self.pixel_size is not None else 1 x1, y1 = self.position x2, y2 = x1 + self.length / ps, y1 self.text_position = ((x1 + x2) / 2., y2 + (self.ymax - self.ymin) / ps * pad) def calculate_size(self, max_size_ratio=0.25): ps = self.pixel_size if self.pixel_size is not None else 1 size = closest_nice_number(ps * (self.xmax - self.xmin) * max_size_ratio) self.length = size def remove(self): if self.line is not None: self.ax.lines.remove(self.line) if self.text is not None: self.ax.texts.remove(self.text) def plot_scale(self, line_width=1): self.remove() ps = self.pixel_size if self.pixel_size is not None else 1 x1, y1 = self.position x2, y2 = x1 + self.length / ps, y1 self.line, = self.ax.plot([x1, x2], [y1, y2], linestyle='-', lw=line_width, animated=self.animated) self.text = self.ax.text(*self.text_position, s=self.get_units_string(), ha='center', size='medium', animated=self.animated) self.ax.set_xlim(self.xmin, self.xmax) self.ax.set_ylim(self.ymin, self.ymax) self.ax.figure.canvas.draw() def _set_position(self, x, y): self.position = x, y self.calculate_text_position() self.plot_scale(line_width=self.line.get_linewidth()) def set_color(self, c): self.line.set_color(c) self.text.set_color(c) self.ax.figure.canvas.draw_idle() def set_length(self, length): color = self.line.get_color() self.length = length self.calculate_scale_size() self.calculate_text_position() self.plot_scale(line_width=self.line.get_linewidth()) self.set_color(color) def set_tex_bold(self): self.tex_bold = True self.text.set_text(self.get_units_string()) self.ax.figure.canvas.draw_idle()
gpl-3.0
bthirion/scikit-learn
sklearn/metrics/cluster/tests/test_bicluster.py
394
1770
"""Testing for bicluster metrics module""" import numpy as np from sklearn.utils.testing import assert_equal, assert_almost_equal from sklearn.metrics.cluster.bicluster import _jaccard from sklearn.metrics import consensus_score def test_jaccard(): a1 = np.array([True, True, False, False]) a2 = np.array([True, True, True, True]) a3 = np.array([False, True, True, False]) a4 = np.array([False, False, True, True]) assert_equal(_jaccard(a1, a1, a1, a1), 1) assert_equal(_jaccard(a1, a1, a2, a2), 0.25) assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7) assert_equal(_jaccard(a1, a1, a4, a4), 0) def test_consensus_score(): a = [[True, True, False, False], [False, False, True, True]] b = a[::-1] assert_equal(consensus_score((a, a), (a, a)), 1) assert_equal(consensus_score((a, a), (b, b)), 1) assert_equal(consensus_score((a, b), (a, b)), 1) assert_equal(consensus_score((a, b), (b, a)), 1) assert_equal(consensus_score((a, a), (b, a)), 0) assert_equal(consensus_score((a, a), (a, b)), 0) assert_equal(consensus_score((b, b), (a, b)), 0) assert_equal(consensus_score((b, b), (b, a)), 0) def test_consensus_score_issue2445(): ''' Different number of biclusters in A and B''' a_rows = np.array([[True, True, False, False], [False, False, True, True], [False, False, False, True]]) a_cols = np.array([[True, True, False, False], [False, False, True, True], [False, False, False, True]]) idx = [0, 2] s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx])) # B contains 2 of the 3 biclusters in A, so score should be 2/3 assert_almost_equal(s, 2.0/3.0)
bsd-3-clause
ndingwall/scikit-learn
sklearn/cluster/tests/test_k_means.py
1
44177
"""Testing for K-means""" import re import sys import numpy as np from scipy import sparse as sp from threadpoolctl import threadpool_limits import pytest from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils.fixes import _astype_copy_false from sklearn.base import clone from sklearn.exceptions import ConvergenceWarning from sklearn.utils.extmath import row_norms from sklearn.metrics import pairwise_distances from sklearn.metrics import pairwise_distances_argmin from sklearn.metrics.cluster import v_measure_score from sklearn.cluster import KMeans, k_means, kmeans_plusplus from sklearn.cluster import MiniBatchKMeans from sklearn.cluster._kmeans import _labels_inertia from sklearn.cluster._kmeans import _mini_batch_step from sklearn.cluster._k_means_fast import _relocate_empty_clusters_dense from sklearn.cluster._k_means_fast import _relocate_empty_clusters_sparse from sklearn.cluster._k_means_fast import _euclidean_dense_dense_wrapper from sklearn.cluster._k_means_fast import _euclidean_sparse_dense_wrapper from sklearn.cluster._k_means_fast import _inertia_dense from sklearn.cluster._k_means_fast import _inertia_sparse from sklearn.datasets import make_blobs from io import StringIO # non centered, sparse centers to check the centers = np.array([ [0.0, 5.0, 0.0, 0.0, 0.0], [1.0, 1.0, 4.0, 0.0, 0.0], [1.0, 0.0, 0.0, 5.0, 1.0], ]) n_samples = 100 n_clusters, n_features = centers.shape X, true_labels = make_blobs(n_samples=n_samples, centers=centers, cluster_std=1., random_state=42) X_csr = sp.csr_matrix(X) @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) @pytest.mark.parametrize("algo", ["full", "elkan"]) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_kmeans_results(array_constr, algo, dtype): # Checks that KMeans works as intended on toy dataset by comparing with # expected results computed by hand. X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype) sample_weight = [3, 1, 1, 3] init_centers = np.array([[0, 0], [1, 1]], dtype=dtype) expected_labels = [0, 0, 1, 1] expected_inertia = 0.375 expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype) expected_n_iter = 2 kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) kmeans.fit(X, sample_weight=sample_weight) assert_array_equal(kmeans.labels_, expected_labels) assert_allclose(kmeans.inertia_, expected_inertia) assert_allclose(kmeans.cluster_centers_, expected_centers) assert kmeans.n_iter_ == expected_n_iter @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=['dense', 'sparse']) @pytest.mark.parametrize("algo", ['full', 'elkan']) def test_kmeans_relocated_clusters(array_constr, algo): # check that empty clusters are relocated as expected X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]]) # second center too far from others points will be empty at first iter init_centers = np.array([[0.5, 0.5], [3, 3]]) expected_labels = [0, 0, 1, 1] expected_inertia = 0.25 expected_centers = [[0.25, 0], [0.75, 1]] expected_n_iter = 3 kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) kmeans.fit(X) assert_array_equal(kmeans.labels_, expected_labels) assert_allclose(kmeans.inertia_, expected_inertia) assert_allclose(kmeans.cluster_centers_, expected_centers) assert kmeans.n_iter_ == expected_n_iter @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) def test_relocate_empty_clusters(array_constr): # test for the _relocate_empty_clusters_(dense/sparse) helpers # Synthetic dataset with 3 obvious clusters of different sizes X = np.array( [-10., -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1) X = array_constr(X) sample_weight = np.ones(10) # centers all initialized to the first point of X centers_old = np.array([-10., -10, -10]).reshape(-1, 1) # With this initialization, all points will be assigned to the first center # At this point a center in centers_new is the weighted sum of the points # it contains if it's not empty, otherwise it is the same as before. centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1) weight_in_clusters = np.array([10., 0, 0]) labels = np.zeros(10, dtype=np.int32) if array_constr is np.array: _relocate_empty_clusters_dense(X, sample_weight, centers_old, centers_new, weight_in_clusters, labels) else: _relocate_empty_clusters_sparse(X.data, X.indices, X.indptr, sample_weight, centers_old, centers_new, weight_in_clusters, labels) # The relocation scheme will take the 2 points farthest from the center and # assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The # first center will be updated to contain the other 8 points. assert_array_equal(weight_in_clusters, [8, 1, 1]) assert_allclose(centers_new, [[-36], [10], [9.5]]) @pytest.mark.parametrize("distribution", ["normal", "blobs"]) @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) @pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0]) def test_kmeans_elkan_results(distribution, array_constr, tol): # Check that results are identical between lloyd and elkan algorithms rnd = np.random.RandomState(0) if distribution == "normal": X = rnd.normal(size=(5000, 10)) else: X, _ = make_blobs(random_state=rnd) X[X < 0] = 0 X = array_constr(X) km_full = KMeans(algorithm="full", n_clusters=5, random_state=0, n_init=1, tol=tol) km_elkan = KMeans(algorithm="elkan", n_clusters=5, random_state=0, n_init=1, tol=tol) km_full.fit(X) km_elkan.fit(X) assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_) assert_array_equal(km_elkan.labels_, km_full.labels_) assert km_elkan.n_iter_ == km_full.n_iter_ assert km_elkan.inertia_ == pytest.approx(km_full.inertia_, rel=1e-6) @pytest.mark.parametrize("algorithm", ["full", "elkan"]) def test_kmeans_convergence(algorithm): # Check that KMeans stops when convergence is reached when tol=0. (#16075) rnd = np.random.RandomState(0) X = rnd.normal(size=(5000, 10)) max_iter = 300 km = KMeans(algorithm=algorithm, n_clusters=5, random_state=0, n_init=1, tol=0, max_iter=max_iter).fit(X) assert km.n_iter_ < max_iter def test_minibatch_update_consistency(): # Check that dense and sparse minibatch update give the same results rng = np.random.RandomState(42) old_centers = centers + rng.normal(size=centers.shape) new_centers = old_centers.copy() new_centers_csr = old_centers.copy() weight_sums = np.zeros(new_centers.shape[0], dtype=np.double) weight_sums_csr = np.zeros(new_centers.shape[0], dtype=np.double) x_squared_norms = (X ** 2).sum(axis=1) x_squared_norms_csr = row_norms(X_csr, squared=True) buffer = np.zeros(centers.shape[1], dtype=np.double) buffer_csr = np.zeros(centers.shape[1], dtype=np.double) # extract a small minibatch X_mb = X[:10] X_mb_csr = X_csr[:10] x_mb_squared_norms = x_squared_norms[:10] x_mb_squared_norms_csr = x_squared_norms_csr[:10] sample_weight_mb = np.ones(X_mb.shape[0], dtype=np.double) # step 1: compute the dense minibatch update old_inertia, incremental_diff = _mini_batch_step( X_mb, sample_weight_mb, x_mb_squared_norms, new_centers, weight_sums, buffer, 1, None, random_reassign=False) assert old_inertia > 0.0 # compute the new inertia on the same batch to check that it decreased labels, new_inertia = _labels_inertia( X_mb, sample_weight_mb, x_mb_squared_norms, new_centers) assert new_inertia > 0.0 assert new_inertia < old_inertia # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers - old_centers) ** 2) assert_almost_equal(incremental_diff, effective_diff) # step 2: compute the sparse minibatch update old_inertia_csr, incremental_diff_csr = _mini_batch_step( X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr, weight_sums_csr, buffer_csr, 1, None, random_reassign=False) assert old_inertia_csr > 0.0 # compute the new inertia on the same batch to check that it decreased labels_csr, new_inertia_csr = _labels_inertia( X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr) assert new_inertia_csr > 0.0 assert new_inertia_csr < old_inertia_csr # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers_csr - old_centers) ** 2) assert_almost_equal(incremental_diff_csr, effective_diff) # step 3: check that sparse and dense updates lead to the same results assert_array_equal(labels, labels_csr) assert_array_almost_equal(new_centers, new_centers_csr) assert_almost_equal(incremental_diff, incremental_diff_csr) assert_almost_equal(old_inertia, old_inertia_csr) assert_almost_equal(new_inertia, new_inertia_csr) def _check_fitted_model(km): # check that the number of clusters centers and distinct labels match # the expectation centers = km.cluster_centers_ assert centers.shape == (n_clusters, n_features) labels = km.labels_ assert np.unique(labels).shape[0] == n_clusters # check that the labels assignment are perfect (up to a permutation) assert v_measure_score(true_labels, labels) == 1.0 assert km.inertia_ > 0.0 @pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"]) @pytest.mark.parametrize("init", ["random", "k-means++", centers, lambda X, k, random_state: centers], ids=["random", "k-means++", "ndarray", "callable"]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_all_init(Estimator, data, init): # Check KMeans and MiniBatchKMeans with all possible init. n_init = 10 if isinstance(init, str) else 1 km = Estimator(init=init, n_clusters=n_clusters, random_state=42, n_init=n_init).fit(data) _check_fitted_model(km) @pytest.mark.parametrize("init", ["random", "k-means++", centers, lambda X, k, random_state: centers], ids=["random", "k-means++", "ndarray", "callable"]) def test_minibatch_kmeans_partial_fit_init(init): # Check MiniBatchKMeans init with partial_fit n_init = 10 if isinstance(init, str) else 1 km = MiniBatchKMeans(init=init, n_clusters=n_clusters, random_state=0, n_init=n_init) for i in range(100): # "random" init requires many batches to recover the true labels. km.partial_fit(X) _check_fitted_model(km) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_fortran_aligned_data(Estimator): # Check that KMeans works with fortran-aligned data. X_fortran = np.asfortranarray(X) centers_fortran = np.asfortranarray(centers) km_c = Estimator(n_clusters=n_clusters, init=centers, n_init=1, random_state=42).fit(X) km_f = Estimator(n_clusters=n_clusters, init=centers_fortran, n_init=1, random_state=42).fit(X_fortran) assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_) assert_array_equal(km_c.labels_, km_f.labels_) @pytest.mark.parametrize('algo', ['full', 'elkan']) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) @pytest.mark.parametrize('constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('seed, max_iter, tol', [ (0, 2, 1e-7), # strict non-convergence (1, 2, 1e-1), # loose non-convergence (3, 300, 1e-7), # strict convergence (4, 300, 1e-1), # loose convergence ]) def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol): # check that fit.predict gives same result as fit_predict # There's a very small chance of failure with elkan on unstructured dataset # because predict method uses fast euclidean distances computation which # may cause small numerical instabilities. # NB: This test is largely redundant with respect to test_predict and # test_predict_equal_labels. This test has the added effect of # testing idempotence of the fittng procesdure which appears to # be where it fails on some MacOS setups. if sys.platform == "darwin": pytest.xfail( "Known failures on MacOS, See " "https://github.com/scikit-learn/scikit-learn/issues/12644") rng = np.random.RandomState(seed) X = make_blobs(n_samples=1000, n_features=10, centers=10, random_state=rng)[0].astype(dtype, copy=False) X = constructor(X) kmeans = KMeans(algorithm=algo, n_clusters=10, random_state=seed, tol=tol, max_iter=max_iter) labels_1 = kmeans.fit(X).predict(X) labels_2 = kmeans.fit_predict(X) # Due to randomness in the order in which chunks of data are processed when # using more than one thread, the absolute values of the labels can be # different between the 2 strategies but they should correspond to the same # clustering. assert v_measure_score(labels_1, labels_2) == pytest.approx(1, abs=1e-15) def test_minibatch_kmeans_verbose(): # Check verbose mode of MiniBatchKMeans for better coverage. km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: km.fit(X) finally: sys.stdout = old_stdout @pytest.mark.parametrize("algorithm", ["full", "elkan"]) @pytest.mark.parametrize("tol", [1e-2, 0]) def test_kmeans_verbose(algorithm, tol, capsys): # Check verbose mode of KMeans for better coverage. X = np.random.RandomState(0).normal(size=(5000, 10)) KMeans(algorithm=algorithm, n_clusters=n_clusters, random_state=42, init="random", n_init=1, tol=tol, verbose=1).fit(X) captured = capsys.readouterr() assert re.search(r"Initialization complete", captured.out) assert re.search(r"Iteration [0-9]+, inertia", captured.out) if tol == 0: assert re.search(r"strict convergence", captured.out) else: assert re.search(r"center shift .* within tolerance", captured.out) def test_minibatch_kmeans_warning_init_size(): # Check that a warning is raised when init_size is smaller than n_clusters with pytest.warns(RuntimeWarning, match=r"init_size.* should be larger than n_clusters"): MiniBatchKMeans(init_size=10, n_clusters=20).fit(X) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_warning_n_init_precomputed_centers(Estimator): # Check that a warning is raised when n_init > 1 and an array is passed for # the init parameter. with pytest.warns(RuntimeWarning, match="Explicit initial center position passed: " "performing only one init"): Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X) def test_minibatch_sensible_reassign(): # check that identical initial clusters are reassigned # also a regression test for when there are more desired reassignments than # samples. zeroed_X, true_labels = make_blobs(n_samples=100, centers=5, random_state=42) zeroed_X[::2, :] = 0 km = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42, init="random").fit(zeroed_X) # there should not be too many exact zero cluster centers assert km.cluster_centers_.any(axis=1).sum() > 10 # do the same with batch-size > X.shape[0] (regression test) km = MiniBatchKMeans(n_clusters=20, batch_size=200, random_state=42, init="random").fit(zeroed_X) # there should not be too many exact zero cluster centers assert km.cluster_centers_.any(axis=1).sum() > 10 # do the same with partial_fit API km = MiniBatchKMeans(n_clusters=20, random_state=42, init="random") for i in range(100): km.partial_fit(zeroed_X) # there should not be too many exact zero cluster centers assert km.cluster_centers_.any(axis=1).sum() > 10 def test_minibatch_reassign(): # Give a perfect initialization, but a large reassignment_ratio, # as a result all the centers should be reassigned and the model # should no longer be good sample_weight = np.ones(X.shape[0], dtype=X.dtype) for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, random_state=42) mb_k_means.fit(this_X) score_before = mb_k_means.score(this_X) try: old_stdout = sys.stdout sys.stdout = StringIO() # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means._counts, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1, verbose=True) finally: sys.stdout = old_stdout assert score_before > mb_k_means.score(this_X) # Give a perfect initialization, with a small reassignment_ratio, # no center should be reassigned for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init=centers.copy(), random_state=42, n_init=1) mb_k_means.fit(this_X) clusters_before = mb_k_means.cluster_centers_ # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means._counts, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1e-15) assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_) def test_minibatch_with_many_reassignments(): # Test for the case that the number of clusters to reassign is bigger # than the batch_size n_samples = 550 rnd = np.random.RandomState(42) X = rnd.uniform(size=(n_samples, 10)) # Check that the fit works if n_clusters is bigger than the batch_size. # Run the test with 550 clusters and 550 samples, because it turned out # that this values ensure that the number of clusters to reassign # is always bigger than the batch_size n_clusters = 550 MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init_size=n_samples, random_state=42).fit(X) def test_minibatch_kmeans_init_size(): # Check the internal _init_size attribute of MiniBatchKMeans # default init size should be 3 * batch_size km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X) assert km._init_size == 15 # if 3 * batch size < n_clusters, it should then be 3 * n_clusters km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X) assert km._init_size == 30 # it should not be larger than n_samples km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1).fit(X) assert km._init_size == n_samples def test_kmeans_copyx(): # Check that copy_x=False returns nearly equal X after de-centering. my_X = X.copy() km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) km.fit(my_X) _check_fitted_model(km) # check that my_X is de-centered assert_allclose(my_X, X) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_score_max_iter(Estimator): # Check that fitting KMeans or MiniBatchKMeans with more iterations gives # better score X = np.random.RandomState(0).randn(100, 10) km1 = Estimator(n_init=1, random_state=42, max_iter=1) s1 = km1.fit(X).score(X) km2 = Estimator(n_init=1, random_state=42, max_iter=10) s2 = km2.fit(X).score(X) assert s2 > s1 @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("init", ["random", "k-means++"]) @pytest.mark.parametrize("Estimator, algorithm", [ (KMeans, "full"), (KMeans, "elkan"), (MiniBatchKMeans, None) ]) def test_predict(Estimator, algorithm, init, dtype, array_constr): # Check the predict method and the equivalence between fit.predict and # fit_predict. # There's a very small chance of failure with elkan on unstructured dataset # because predict method uses fast euclidean distances computation which # may cause small numerical instabilities. if sys.platform == "darwin": pytest.xfail( "Known failures on MacOS, See " "https://github.com/scikit-learn/scikit-learn/issues/12644") X, _ = make_blobs(n_samples=500, n_features=10, centers=10, random_state=0) X = array_constr(X) # With n_init = 1 km = Estimator(n_clusters=10, init=init, n_init=1, random_state=0) if algorithm is not None: km.set_params(algorithm=algorithm) km.fit(X) labels = km.labels_ # re-predict labels for training set using predict pred = km.predict(X) assert_array_equal(pred, labels) # re-predict labels for training set using fit_predict pred = km.fit_predict(X) assert_array_equal(pred, labels) # predict centroid labels pred = km.predict(km.cluster_centers_) assert_array_equal(pred, np.arange(10)) # With n_init > 1 # Due to randomness in the order in which chunks of data are processed when # using more than one thread, there might be different rounding errors for # the computation of the inertia between 2 runs. This might result in a # different ranking of 2 inits, hence a different labeling, even if they # give the same clustering. We only check the labels up to a permutation. km = Estimator(n_clusters=10, init=init, n_init=10, random_state=0) if algorithm is not None: km.set_params(algorithm=algorithm) km.fit(X) labels = km.labels_ # re-predict labels for training set using predict pred = km.predict(X) assert_allclose(v_measure_score(pred, labels), 1) # re-predict labels for training set using fit_predict pred = km.fit_predict(X) assert_allclose(v_measure_score(pred, labels), 1) # predict centroid labels pred = km.predict(km.cluster_centers_) assert_allclose(v_measure_score(pred, np.arange(10)), 1) @pytest.mark.parametrize("init", ["random", "k-means++", centers], ids=["random", "k-means++", "ndarray"]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_predict_dense_sparse(Estimator, init): # check that models trained on sparse input also works for dense input at # predict time and vice versa. n_init = 10 if isinstance(init, str) else 1 km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0) km.fit(X_csr) assert_array_equal(km.predict(X), km.labels_) km.fit(X) assert_array_equal(km.predict(X_csr), km.labels_) @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) @pytest.mark.parametrize("dtype", [np.int32, np.int64]) @pytest.mark.parametrize("init", ["k-means++", "ndarray"]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_integer_input(Estimator, array_constr, dtype, init): # Check that KMeans and MiniBatchKMeans work with integer input. X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]) X = array_constr(X_dense, dtype=dtype) n_init = 1 if init == "ndarray" else 10 init = X_dense[:2] if init == "ndarray" else init km = Estimator(n_clusters=2, init=init, n_init=n_init, random_state=0) if Estimator is MiniBatchKMeans: km.set_params(batch_size=2) km.fit(X) # Internally integer input should be converted to float64 assert km.cluster_centers_.dtype == np.float64 expected_labels = [0, 1, 1, 0, 0, 1] assert_allclose(v_measure_score(km.labels_, expected_labels), 1) # Same with partial_fit (#14314) if Estimator is MiniBatchKMeans: km = clone(km).partial_fit(X) assert km.cluster_centers_.dtype == np.float64 @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_transform(Estimator): # Check the transform method km = Estimator(n_clusters=n_clusters).fit(X) # Transorfming cluster_centers_ should return the pairwise distances # between centers Xt = km.transform(km.cluster_centers_) assert_allclose(Xt, pairwise_distances(km.cluster_centers_)) # In particular, diagonal must be 0 assert_array_equal(Xt.diagonal(), np.zeros(n_clusters)) # Transorfming X should return the pairwise distances between X and the # centers Xt = km.transform(X) assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_)) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_fit_transform(Estimator): # Check equivalence between fit.transform and fit_transform X1 = Estimator(random_state=0, n_init=1).fit(X).transform(X) X2 = Estimator(random_state=0, n_init=1).fit_transform(X) assert_allclose(X1, X2) def test_n_init(): # Check that increasing the number of init increases the quality previous_inertia = np.inf for n_init in [1, 5, 10]: # set max_iter=1 to avoid finding the global minimum and get the same # inertia each time km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, random_state=0, max_iter=1).fit(X) assert km.inertia_ <= previous_inertia def test_k_means_function(): # test calling the k_means function directly cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters, sample_weight=None) assert cluster_centers.shape == (n_clusters, n_features) assert np.unique(labels).shape[0] == n_clusters # check that the labels assignment are perfect (up to a permutation) assert_allclose(v_measure_score(true_labels, labels), 1.0) assert inertia > 0.0 @pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_float_precision(Estimator, data): # Check that the results are the same for single and double precision. km = Estimator(n_init=1, random_state=0) inertia = {} Xt = {} centers = {} labels = {} for dtype in [np.float64, np.float32]: X = data.astype(dtype, **_astype_copy_false(data)) km.fit(X) inertia[dtype] = km.inertia_ Xt[dtype] = km.transform(X) centers[dtype] = km.cluster_centers_ labels[dtype] = km.labels_ # dtype of cluster centers has to be the dtype of the input data assert km.cluster_centers_.dtype == dtype # same with partial_fit if Estimator is MiniBatchKMeans: km.partial_fit(X[0:3]) assert km.cluster_centers_.dtype == dtype # compare arrays with low precision since the difference between 32 and # 64 bit comes from an accumulation of rounding errors. assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5) assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5) assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5) assert_array_equal(labels[np.float32], labels[np.float64]) @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_centers_not_mutated(Estimator, dtype): # Check that KMeans and MiniBatchKMeans won't mutate the user provided # init centers silently even if input data and init centers have the same # type. X_new_type = X.astype(dtype, copy=False) centers_new_type = centers.astype(dtype, copy=False) km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1) km.fit(X_new_type) assert not np.may_share_memory(km.cluster_centers_, centers_new_type) @pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"]) def test_kmeans_init_fitted_centers(data): # Check that starting fitting from a local optimum shouldn't change the # solution km1 = KMeans(n_clusters=n_clusters).fit(data) km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit(data) assert_allclose(km1.cluster_centers_, km2.cluster_centers_) def test_kmeans_warns_less_centers_than_unique_points(): # Check KMeans when the number of found clusters is smaller than expected X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated km = KMeans(n_clusters=4) # KMeans should warn that fewer labels than cluster centers have been used msg = (r"Number of distinct clusters \(3\) found smaller than " r"n_clusters \(4\). Possibly due to duplicate points in X.") with pytest.warns(ConvergenceWarning, match=msg): km.fit(X) # only three distinct points, so only three clusters # can have points assigned to them assert set(km.labels_) == set(range(3)) def _sort_centers(centers): return np.sort(centers, axis=0) def test_weighted_vs_repeated(): # Check that a sample weight of N should yield the same result as an N-fold # repetition of the sample. Valid only if init is precomputed, otherwise # rng produces different results. Not valid for MinibatchKMeans due to rng # to extract minibatches. sample_weight = np.random.RandomState(0).randint(1, 5, size=n_samples) X_repeat = np.repeat(X, sample_weight, axis=0) km = KMeans(init=centers, n_init=1, n_clusters=n_clusters, random_state=0) km_weighted = clone(km).fit(X, sample_weight=sample_weight) repeated_labels = np.repeat(km_weighted.labels_, sample_weight) km_repeated = clone(km).fit(X_repeat) assert_array_equal(km_repeated.labels_, repeated_labels) assert_allclose(km_weighted.inertia_, km_repeated.inertia_) assert_allclose(_sort_centers(km_weighted.cluster_centers_), _sort_centers(km_repeated.cluster_centers_)) @pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"]) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_unit_weights_vs_no_weights(Estimator, data): # Check that not passing sample weights should be equivalent to passing # sample weights all equal to one. sample_weight = np.ones(n_samples) km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1) km_none = clone(km).fit(data, sample_weight=None) km_ones = clone(km).fit(data, sample_weight=sample_weight) assert_array_equal(km_none.labels_, km_ones.labels_) assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_) def test_scaled_weights(): # scaling all sample weights by a common factor # shouldn't change the result sample_weight = np.ones(n_samples) for estimator in [KMeans(n_clusters=n_clusters, random_state=42), MiniBatchKMeans(n_clusters=n_clusters, random_state=42)]: est_1 = clone(estimator).fit(X) est_2 = clone(estimator).fit(X, sample_weight=0.5*sample_weight) assert_almost_equal(v_measure_score(est_1.labels_, est_2.labels_), 1.0) assert_almost_equal(_sort_centers(est_1.cluster_centers_), _sort_centers(est_2.cluster_centers_)) def test_kmeans_elkan_iter_attribute(): # Regression test on bad n_iter_ value. Previous bug n_iter_ was one off # it's right value (#11340). km = KMeans(algorithm="elkan", max_iter=1).fit(X) assert km.n_iter_ == 1 @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) def test_kmeans_empty_cluster_relocated(array_constr): # check that empty clusters are correctly relocated when using sample # weights (#13486) X = array_constr([[-1], [1]]) sample_weight = [1.9, 0.1] init = np.array([[-1], [10]]) km = KMeans(n_clusters=2, init=init, n_init=1) km.fit(X, sample_weight=sample_weight) assert len(set(km.labels_)) == 2 assert_allclose(km.cluster_centers_, [[-1], [1]]) def test_result_of_kmeans_equal_in_diff_n_threads(): # Check that KMeans gives the same results in parallel mode than in # sequential mode. rnd = np.random.RandomState(0) X = rnd.normal(size=(50, 10)) with threadpool_limits(limits=1, user_api="openmp"): result_1 = KMeans( n_clusters=3, random_state=0).fit(X).labels_ with threadpool_limits(limits=2, user_api="openmp"): result_2 = KMeans( n_clusters=3, random_state=0).fit(X).labels_ assert_array_equal(result_1, result_2) @pytest.mark.parametrize("precompute_distances", ["auto", False, True]) def test_precompute_distance_deprecated(precompute_distances): # FIXME: remove in 0.25 depr_msg = ("'precompute_distances' was deprecated in version 0.23 and " "will be removed in 0.25.") X, _ = make_blobs(n_samples=10, n_features=2, centers=2, random_state=0) kmeans = KMeans(n_clusters=2, n_init=1, init='random', random_state=0, precompute_distances=precompute_distances) with pytest.warns(FutureWarning, match=depr_msg): kmeans.fit(X) @pytest.mark.parametrize("n_jobs", [None, 1]) def test_n_jobs_deprecated(n_jobs): # FIXME: remove in 0.25 depr_msg = ("'n_jobs' was deprecated in version 0.23 and will be removed " "in 0.25.") X, _ = make_blobs(n_samples=10, n_features=2, centers=2, random_state=0) kmeans = KMeans(n_clusters=2, n_init=1, init='random', random_state=0, n_jobs=n_jobs) with pytest.warns(FutureWarning, match=depr_msg): kmeans.fit(X) @pytest.mark.parametrize("attr", ["counts_", "init_size_", "random_state_"]) def test_minibatch_kmeans_deprecated_attributes(attr): # check that we raise a deprecation warning when accessing `init_size_` # FIXME: remove in 0.26 depr_msg = (f"The attribute '{attr}' is deprecated in 0.24 and will be " f"removed in 0.26.") km = MiniBatchKMeans(n_clusters=2, n_init=1, init='random', random_state=0) km.fit(X) with pytest.warns(FutureWarning, match=depr_msg): getattr(km, attr) def test_warning_elkan_1_cluster(): # Check warning messages specific to KMeans with pytest.warns(RuntimeWarning, match="algorithm='elkan' doesn't make sense for a single" " cluster"): KMeans(n_clusters=1, algorithm="elkan").fit(X) @pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]) @pytest.mark.parametrize("algo", ["full", "elkan"]) def test_k_means_1_iteration(array_constr, algo): # check the results after a single iteration (E-step M-step E-step) by # comparing against a pure python implementation. X = np.random.RandomState(0).uniform(size=(100, 5)) init_centers = X[:5] X = array_constr(X) def py_kmeans(X, init): new_centers = init.copy() labels = pairwise_distances_argmin(X, init) for label in range(init.shape[0]): new_centers[label] = X[labels == label].mean(axis=0) labels = pairwise_distances_argmin(X, new_centers) return labels, new_centers py_labels, py_centers = py_kmeans(X, init_centers) cy_kmeans = KMeans(n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1).fit(X) cy_labels = cy_kmeans.labels_ cy_centers = cy_kmeans.cluster_centers_ assert_array_equal(py_labels, cy_labels) assert_allclose(py_centers, cy_centers) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("squared", [True, False]) def test_euclidean_distance(dtype, squared): # Check that the _euclidean_(dense/sparse)_dense helpers produce correct # results rng = np.random.RandomState(0) a_sparse = sp.random(1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype) a_dense = a_sparse.toarray().reshape(-1) b = rng.randn(100).astype(dtype, copy=False) b_squared_norm = (b**2).sum() expected = ((a_dense - b)**2).sum() expected = expected if squared else np.sqrt(expected) distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared) distance_sparse_dense = _euclidean_sparse_dense_wrapper( a_sparse.data, a_sparse.indices, b, b_squared_norm, squared) assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6) assert_allclose(distance_dense_dense, expected, rtol=1e-6) assert_allclose(distance_sparse_dense, expected, rtol=1e-6) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_inertia(dtype): rng = np.random.RandomState(0) X_sparse = sp.random(100, 10, density=0.5, format="csr", random_state=rng, dtype=dtype) X_dense = X_sparse.toarray() sample_weight = rng.randn(100).astype(dtype, copy=False) centers = rng.randn(5, 10).astype(dtype, copy=False) labels = rng.randint(5, size=100, dtype=np.int32) distances = ((X_dense - centers[labels])**2).sum(axis=1) expected = np.sum(distances * sample_weight) inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels) inertia_sparse = _inertia_sparse(X_sparse, sample_weight, centers, labels) assert_allclose(inertia_dense, inertia_sparse, rtol=1e-6) assert_allclose(inertia_dense, expected, rtol=1e-6) assert_allclose(inertia_sparse, expected, rtol=1e-6) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) def test_sample_weight_unchanged(Estimator): # Check that sample_weight is not modified in place by KMeans (#17204) X = np.array([[1], [2], [4]]) sample_weight = np.array([0.5, 0.2, 0.3]) Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight) assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3])) @pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) @pytest.mark.parametrize("param, match", [ ({"n_init": 0}, r"n_init should be > 0"), ({"max_iter": 0}, r"max_iter should be > 0"), ({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"), ({"init": X[:2]}, r"The shape of the initial centers .* does not match " r"the number of clusters"), ({"init": lambda X_, k, random_state: X_[:2]}, r"The shape of the initial centers .* does not match " r"the number of clusters"), ({"init": X[:8, :2]}, r"The shape of the initial centers .* does not match " r"the number of features of the data"), ({"init": lambda X_, k, random_state: X_[:8, :2]}, r"The shape of the initial centers .* does not match " r"the number of features of the data"), ({"init": "wrong"}, r"init should be either 'k-means\+\+', 'random', " r"a ndarray or a callable")] ) def test_wrong_params(Estimator, param, match): # Check that error are raised with clear error message when wrong values # are passed for the parameters # Set n_init=1 by default to avoid warning with precomputed init km = Estimator(n_init=1) with pytest.raises(ValueError, match=match): km.set_params(**param).fit(X) @pytest.mark.parametrize("param, match", [ ({"algorithm": "wrong"}, r"Algorithm must be 'auto', 'full' or 'elkan'")] ) def test_kmeans_wrong_params(param, match): # Check that error are raised with clear error message when wrong values # are passed for the KMeans specific parameters with pytest.raises(ValueError, match=match): KMeans(**param).fit(X) @pytest.mark.parametrize("param, match", [ ({"max_no_improvement": -1}, r"max_no_improvement should be >= 0"), ({"batch_size": -1}, r"batch_size should be > 0"), ({"init_size": -1}, r"init_size should be > 0"), ({"reassignment_ratio": -1}, r"reassignment_ratio should be >= 0")] ) def test_minibatch_kmeans_wrong_params(param, match): # Check that error are raised with clear error message when wrong values # are passed for the MiniBatchKMeans specific parameters with pytest.raises(ValueError, match=match): MiniBatchKMeans(**param).fit(X) @pytest.mark.parametrize("param, match", [ ({"n_local_trials": 0}, r"n_local_trials is set to 0 but should be an " r"integer value greater than zero"), ({"x_squared_norms": X[:2]}, r"The length of x_squared_norms .* should " r"be equal to the length of n_samples")] ) def test_kmeans_plusplus_wrong_params(param, match): with pytest.raises(ValueError, match=match): kmeans_plusplus(X, n_clusters, **param) @pytest.mark.parametrize("data", [X, X_csr]) @pytest.mark.parametrize("dtype", [np.float64, np.float32]) def test_kmeans_plusplus_output(data, dtype): # Check for the correct number of seeds and all positive values data = data.astype(dtype) centers, indices = kmeans_plusplus(data, n_clusters) # Check there are the correct number of indices and that all indices are # positive and within the number of samples assert indices.shape[0] == n_clusters assert (indices >= 0).all() assert (indices <= data.shape[0]).all() # Check for the correct number of seeds and that they are bound by the data assert centers.shape[0] == n_clusters assert (centers.max(axis=0) <= data.max(axis=0)).all() assert (centers.min(axis=0) >= data.min(axis=0)).all() # Check that indices correspond to reported centers # Use X for comparison rather than data, test still works against centers # calculated with sparse data. assert_allclose(X[indices].astype(dtype), centers) @pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None]) def test_kmeans_plusplus_norms(x_squared_norms): # Check that defining x_squared_norms returns the same as default=None. centers, indices = kmeans_plusplus(X, n_clusters, x_squared_norms=x_squared_norms) assert_allclose(X[indices], centers) def test_kmeans_plusplus_dataorder(): # Check that memory layout does not effect result centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=0) X_fortran = np.asfortranarray(X) centers_fortran, _ = kmeans_plusplus(X_fortran, n_clusters, random_state=0) assert_allclose(centers_c, centers_fortran)
bsd-3-clause
miaecle/deepchem
deepchem/models/tests/test_reload.py
1
1303
""" Test reload for trained models. """ __author__ = "Bharath Ramsundar" __copyright__ = "Copyright 2016, Stanford University" __license__ = "MIT" import unittest import tempfile import numpy as np import deepchem as dc import tensorflow as tf from sklearn.ensemble import RandomForestClassifier class TestReload(unittest.TestCase): def test_sklearn_reload(self): """Test that trained model can be reloaded correctly.""" n_samples = 10 n_features = 3 n_tasks = 1 # Generate dummy dataset np.random.seed(123) ids = np.arange(n_samples) X = np.random.rand(n_samples, n_features) y = np.random.randint(2, size=(n_samples, n_tasks)) w = np.ones((n_samples, n_tasks)) dataset = dc.data.NumpyDataset(X, y, w, ids) classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score) sklearn_model = RandomForestClassifier() model_dir = tempfile.mkdtemp() model = dc.models.SklearnModel(sklearn_model, model_dir) # Fit trained model model.fit(dataset) model.save() # Load trained model reloaded_model = dc.models.SklearnModel(None, model_dir) reloaded_model.reload() # Eval model on train scores = reloaded_model.evaluate(dataset, [classification_metric]) assert scores[classification_metric.name] > .9
mit
arthurmensch/modl
modl/decomposition/dict_fact.py
1
28827
import atexit from concurrent.futures import ThreadPoolExecutor from math import log, ceil from tempfile import TemporaryFile import numpy as np import scipy import time from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils import check_array, check_random_state, gen_batches from sklearn.utils.validation import check_is_fitted from modl.utils import get_sub_slice from modl.utils.randomkit import RandomState from modl.utils.randomkit import Sampler from .dict_fact_fast import _enet_regression_multi_gram, \ _enet_regression_single_gram, _update_G_average, _batch_weight from ..utils.math.enet import enet_norm, enet_projection, enet_scale MAX_INT = np.iinfo(np.int64).max class CodingMixin(TransformerMixin): def _set_coding_params(self, n_components, code_alpha=1, code_l1_ratio=1, tol=1e-2, max_iter=100, code_pos=False, random_state=None, n_threads=1 ): self.n_components = n_components self.code_l1_ratio = code_l1_ratio self.code_alpha = code_alpha self.code_pos = code_pos self.random_state = random_state self.tol = tol self.max_iter = max_iter self.n_threads = n_threads if self.n_threads > 1: self._pool = ThreadPoolExecutor(n_threads) def transform(self, X): """ Compute the codes associated to input matrix X, decomposing it onto the dictionary Parameters ---------- X: ndarray, shape = (n_samples, n_features) Returns ------- code: ndarray, shape = (n_samples, n_components) """ check_is_fitted(self, 'components_') dtype = self.components_.dtype X = check_array(X, order='C', dtype=dtype.type) if X.flags['WRITEABLE'] is False: X = X.copy() n_samples, n_features = X.shape if not hasattr(self, 'G_agg') or self.G_agg != 'full': G = self.components_.dot(self.components_.T) else: G = self.G_ Dx = X.dot(self.components_.T) code = np.ones((n_samples, self.n_components), dtype=dtype) sample_indices = np.arange(n_samples) size_job = ceil(n_samples / self.n_threads) batches = list(gen_batches(n_samples, size_job)) par_func = lambda batch: _enet_regression_single_gram( G, Dx[batch], X[batch], code, get_sub_slice(sample_indices, batch), self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) if self.n_threads > 1: res = self._pool.map(par_func, batches) _ = list(res) else: _enet_regression_single_gram( G, Dx, X, code, sample_indices, self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) return code def score(self, X): """ Objective function value on test data X Parameters ---------- X: ndarray, shape=(n_samples, n_features) Input matrix Returns ------- score: float, positive """ check_is_fitted(self, 'components_') code = self.transform(X) loss = np.sum((X - code.dot(self.components_)) ** 2) / 2 norm1_code = np.sum(np.abs(code)) norm2_code = np.sum(code ** 2) regul = self.code_alpha * (norm1_code * self.code_l1_ratio + (1 - self.code_l1_ratio) * norm2_code / 2) return (loss + regul) / X.shape[0] def __getstate__(self): state = dict(self.__dict__) state.pop('_pool', None) return state def __setstate__(self, state): self.__dict__ = state if self.n_threads > 1: self._pool = ThreadPoolExecutor(self.n_threads) class DictFact(CodingMixin, BaseEstimator): def __init__(self, reduction=1, learning_rate=1, sample_learning_rate=0.76, Dx_agg='masked', G_agg='masked', optimizer='variational', dict_init=None, code_alpha=1, code_l1_ratio=1, comp_l1_ratio=0, step_size=1, tol=1e-2, max_iter=100, code_pos=False, comp_pos=False, random_state=None, n_epochs=1, n_components=10, batch_size=10, verbose=0, callback=None, n_threads=1, rand_size=True, replacement=True, ): """ Estimator to perform matrix factorization by streaming samples and subsampling them randomly to increase speed. Solve for argmin_{comp_l1_ratio ||D^j ||_1 + (1 - comp_l1_ratio) || D^j ||_2^2 < 1, A} 1 / 2 || X - D A ||_2 + code_alpha ((1 - code_l1_ratio) || A ||_2 / 2 + code_l1_ratio || A ||_1) References ---------- 'Massive Online Dictionary Learning' A. Mensch, J. Mairal, B. Thrion, G. Varoquaux, ICML '16 'Subsampled Online Matrix Factorization with Convergence Guarantees A. Mensch, J. Mairal, G. Varoquaux, B. Thrion, OPT@NIPS '16 Parameters ---------- reduction: float Ratio of reduction in accessing the features of the data stream. The larger, the _faster the algorithm will go over data. Too large reduction may lead to slower convergence. learning_rate: float in ]0.917, 1] Weights to use in learning the dictionary. 1 means no forgetting, lower means forgetting the past _faster, 0.917 is the theoretical limit for convergence. sample_learning_rate: float in ]0.75, 3 * learning_rate - 2[ Weights to use in reducing the variance due to the stochastic subsampling, when Dx_agg == 'average' or G_agg == 'average'. Lower means forgetting the past _faster Dx_agg: str in ['full', 'average', 'masked'] Estimator to use in estimating D^T x_t G_agg: str in ['full', 'average', 'masked'] Estimator to use in estimating the Gram matrix D^T D code_alpha: float, positive Penalty applied to the code in the minimization problem code_l1_ratio: float in [0, 1] Ratio of l1 penalty for the code in the minimization problem dict_init: ndarray, shape = (n_components, n_features) Initial dictionary n_epochs: int Number of epochs to perform over data n_components: int Number of pipelining in the dictionary batch_size: int Size of mini-batches to use code_pos: boolean, Learn a positive code comp_pos: boolean, Learn a positive dictionary random_state: np.random.RandomState or int Seed randomness in the learning algorithm comp_l1_ratio: float in [0, 1] Ratio of l1 in the dictionary constraint verbose: int, positive Control the verbosity of the estimator callback: callable, Function called from time to time with local variables n_threads: int Number of processors to use in the algorithm tol: float, positive Tolerance for the elastic-net solver max_iter: int, positive Maximum iteration for the elastic-net solver rand_size: boolean Whether the masks should have fixed size replacement: boolean Whether to compute random or cycling masks Attributes ---------- self.components_: ndarray, shape = (n_components, n_features) Current estimation of the dictionary self.code_: ndarray, shape = (n_samples, n_components) Current estimation of each sample code self.C_: ndarray, shape = (n_components, n_components) For computing D gradient self.B_: ndarray, shape = (n_components, n_features) For computing D gradient self.gradient_: ndarray, shape = (n_components, n_features) D gradient, to perform block coordinate descent self.G_: ndarray, shape = (n_components, n_components) Gram matrix self.Dx_average_: ndarray, shape = (n_samples, n_components) Current estimate of D^T X self.G_average_: ndarray, shape = (n_samples, n_components, n_components) Averaged previously seen subsampled Gram matrix. Memory-mapped self.n_iter_: int Number of seen samples self.sample_n_iter_: int Number of time each sample has been seen self.verbose_iter_: int List of verbose iteration self.feature_sampler_: Sampler Generator of masks """ self.batch_size = batch_size self.learning_rate = learning_rate self.sample_learning_rate = sample_learning_rate self.Dx_agg = Dx_agg self.G_agg = G_agg self.reduction = reduction self.dict_init = dict_init self._set_coding_params(n_components, code_l1_ratio=code_l1_ratio, code_alpha=code_alpha, code_pos=code_pos, random_state=random_state, tol=tol, max_iter=max_iter, n_threads=n_threads) self.comp_l1_ratio = comp_l1_ratio self.comp_pos = comp_pos self.optimizer = optimizer self.step_size = step_size self.n_epochs = n_epochs self.verbose = verbose self.callback = callback self.n_threads = n_threads self.rand_size = rand_size self.replacement = replacement def fit(self, X): """ Compute the factorisation X ~ code_ x components_, solving for D, code_ = argmin_{r2 ||D^j ||_1 + (1 - r2) || D^j ||_2^2 < 1} 1 / 2 || X - D A ||_2 + (1 - r) || A ||_2 / 2 + r || A ||_1 Parameters ---------- X: ndarray, shape= (n_samples, n_features) Returns ------- self """ X = check_array(X, order='C', dtype=[np.float32, np.float64]) if self.dict_init is None: dict_init = X else: dict_init = check_array(self.dict_init, dtype=X.dtype.type) self.prepare(n_samples=X.shape[0], X=dict_init) # Main loop for _ in range(self.n_epochs): self.partial_fit(X) permutation = self.shuffle() X = X[permutation] return self def partial_fit(self, X, sample_indices=None): """ Update the factorization using rows from X Parameters ---------- X: ndarray, shape (n_samples, n_features) Input data sample_indices: Indices for each row of X. If None, consider that row i index is i (useful when providing the whole data to the function) Returns ------- self """ X = check_array(X, dtype=[np.float32, np.float64], order='C') n_samples, n_features = X.shape batches = gen_batches(n_samples, self.batch_size) for batch in batches: this_X = X[batch] these_sample_indices = get_sub_slice(sample_indices, batch) self._single_batch_fit(this_X, these_sample_indices) return self def set_params(self, **params): """Set the parameters of this estimator. The optimizer works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ G_agg = params.pop('G_agg', None) if G_agg == 'full' and self.G_agg != 'full': if hasattr(self, 'components_'): self.G_ = self.components_.dot(self.components_.T) self.G_agg = 'full' BaseEstimator.set_params(self, **params) def shuffle(self): """ Shuffle regression statistics, code_, G_average_ and Dx_average_ and return the permutation used Returns ------- permutation: ndarray, shape = (n_samples) Permutation used in shuffling regression statistics """ random_seed = self.random_state.randint(MAX_INT) random_state = RandomState(random_seed) list = [self.code_] if self.G_agg == 'average': list.append(self.G_average_) if self.Dx_agg == 'average': list.append(self.Dx_average_) perm = random_state.shuffle_with_trace(list) self.labels_ = self.labels_[perm] return perm def prepare(self, n_samples=None, n_features=None, dtype=None, X=None): """ Init estimator attributes based on input shape and type. Parameters ---------- n_samples: int, n_features: int, dtype: dtype in np.float32, np.float64 to use in the estimator. Override X.dtype if provided X: ndarray, shape (> n_components, n_features) Array to use to determine shape and types, and init dictionary if provided Returns ------- self """ if X is not None: X = check_array(X, order='C', dtype=[np.float32, np.float64]) if dtype is None: dtype = X.dtype # Transpose to fit usual column streaming this_n_samples = X.shape[0] if n_samples is None: n_samples = this_n_samples if n_features is None: n_features = X.shape[1] else: if n_features != X.shape[1]: raise ValueError('n_features and X does not match') else: if n_features is None or n_samples is None: raise ValueError('Either provide' 'shape or data to function prepare.') if dtype is None: dtype = np.float64 elif dtype not in [np.float32, np.float64]: return ValueError('dtype should be float32 or float64') if self.optimizer not in ['variational', 'sgd']: return ValueError("optimizer should be 'variational' or 'sgd'") if self.optimizer == 'sgd': self.reduction = 1 self.G_agg = 'full' self.Dx_agg = 'full' # Regression statistics if self.G_agg == 'average': with TemporaryFile() as self.G_average_mmap_: self.G_average_mmap_ = TemporaryFile() self.G_average_ = np.memmap(self.G_average_mmap_, mode='w+', shape=(n_samples, self.n_components, self.n_components), dtype=dtype) atexit.register(self._exit) if self.Dx_agg == 'average': self.Dx_average_ = np.zeros((n_samples, self.n_components), dtype=dtype) # Dictionary statistics self.C_ = np.zeros((self.n_components, self.n_components), dtype=dtype) self.B_ = np.zeros((self.n_components, n_features), dtype=dtype) self.gradient_ = np.zeros((self.n_components, n_features), dtype=dtype, order='F') self.random_state = check_random_state(self.random_state) if X is None: self.components_ = np.empty((self.n_components, n_features), dtype=dtype) self.components_[:, :] = self.random_state.randn(self.n_components, n_features) else: # random_idx = self.random_state.permutation(this_n_samples)[ # :self.n_components] self.components_ = check_array(X[:self.n_components], dtype=dtype.type, copy=True) if self.comp_pos: self.components_[self.components_ <= 0] = \ - self.components_[self.components_ <= 0] for i in range(self.n_components): enet_scale(self.components_[i], l1_ratio=self.comp_l1_ratio, radius=1) self.code_ = np.ones((n_samples, self.n_components), dtype=dtype) self.labels_ = np.arange(n_samples) self.comp_norm_ = np.zeros(self.n_components, dtype=dtype) if self.G_agg == 'full': self.G_ = self.components_.dot(self.components_.T) self.n_iter_ = 0 self.sample_n_iter_ = np.zeros(n_samples, dtype='int') self.random_state = check_random_state(self.random_state) random_seed = self.random_state.randint(MAX_INT) self.feature_sampler_ = Sampler(n_features, self.rand_size, self.replacement, random_seed) if self.verbose: self.verbose_iter_ = np.linspace(0, n_samples * self.n_epochs, self.verbose).tolist() self.time_ = 0 return self def _callback(self): if self.callback is not None: self.callback(self) def _single_batch_fit(self, X, sample_indices): """Fit a single batch X: compute code, update statistics, update the dictionary""" if (self.verbose and self.verbose_iter_ and self.n_iter_ >= self.verbose_iter_[0]): print('Iteration %i' % self.n_iter_) self.verbose_iter_ = self.verbose_iter_[1:] self._callback() if X.flags['WRITEABLE'] is False: X = X.copy() t0 = time.perf_counter() subset = self.feature_sampler_.yield_subset(self.reduction) batch_size = X.shape[0] self.n_iter_ += batch_size self.sample_n_iter_[sample_indices] += 1 this_sample_n_iter = self.sample_n_iter_[sample_indices] w_sample = np.power(this_sample_n_iter, -self.sample_learning_rate). \ astype(self.components_.dtype) w = _batch_weight(self.n_iter_, batch_size, self.learning_rate, 0) self._compute_code(X, sample_indices, w_sample, subset) this_code = self.code_[sample_indices] if self.n_threads == 1: self._update_stat_and_dict(subset, X, this_code, w) else: self._update_stat_and_dict_parallel(subset, X, this_code, w) self.time_ += time.perf_counter() - t0 def _update_stat_and_dict(self, subset, X, code, w): """For multi-threading""" self._update_C(code, w) self._update_B(X, code, w) self.gradient_[:, subset] = self.B_[:, subset] self._update_dict(subset, w) def _update_stat_and_dict_parallel(self, subset, X, this_code, w): """For multi-threading""" self.gradient_[:, subset] = self.B_[:, subset] dict_thread = self._pool.submit(self._update_stat_partial_and_dict, subset, X, this_code, w) B_thread = self._pool.submit(self._update_B, X, this_code, w) dict_thread.result() B_thread.result() def _update_stat_partial_and_dict(self, subset, X, code, w): """For multi-threading""" self._update_C(code, w) # Gradient update batch_size = X.shape[0] X_subset = X[:, subset] if self.optimizer == 'variational': self.gradient_[:, subset] *= 1 - w self.gradient_[:, subset] += w * code.T.dot(X_subset) / batch_size else: self.gradient_[:, subset] = code.T.dot(X_subset) / batch_size self._update_dict(subset, w) def _update_B(self, X, code, w): """Update B statistics (for updating D)""" batch_size = X.shape[0] if self.optimizer == 'variational': self.B_ *= 1 - w self.B_ += w * code.T.dot(X) / batch_size else: self.B_ = code.T.dot(X) / batch_size def _update_C(self, this_code, w): """Update C statistics (for updating D)""" batch_size = this_code.shape[0] if self.optimizer == 'variational': self.C_ *= 1 - w self.C_ += w * this_code.T.dot(this_code) / batch_size else: self.C_ = this_code.T.dot(this_code) / batch_size def _compute_code(self, X, sample_indices, w_sample, subset): """Update regression statistics if necessary and compute code from X[:, subset]""" batch_size, n_features = X.shape reduction = self.reduction if self.n_threads > 1: size_job = ceil(batch_size / self.n_threads) batches = list(gen_batches(batch_size, size_job)) if self.Dx_agg != 'full' or self.G_agg != 'full': components_subset = self.components_[:, subset] if self.Dx_agg == 'full': Dx = X.dot(self.components_.T) else: X_subset = X[:, subset] Dx = X_subset.dot(components_subset.T) * reduction if self.Dx_agg == 'average': self.Dx_average_[sample_indices] \ *= 1 - w_sample[:, np.newaxis] self.Dx_average_[sample_indices] \ += Dx * w_sample[:, np.newaxis] Dx = self.Dx_average_[sample_indices] if self.G_agg != 'full': G = components_subset.dot(components_subset.T) * reduction if self.G_agg == 'average': G_average = np.array(self.G_average_[sample_indices], copy=True) if self.n_threads > 1: par_func = lambda batch: _update_G_average( G_average[batch], G, w_sample[batch], ) res = self._pool.map(par_func, batches) _ = list(res) else: _update_G_average(G_average, G, w_sample) self.G_average_[sample_indices] = G_average else: G = self.G_ if self.n_threads > 1: if self.G_agg == 'average': par_func = lambda batch: _enet_regression_multi_gram( G_average[batch], Dx[batch], X[batch], self.code_, get_sub_slice(sample_indices, batch), self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) else: par_func = lambda batch: _enet_regression_single_gram( G, Dx[batch], X[batch], self.code_, get_sub_slice(sample_indices, batch), self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) res = self._pool.map(par_func, batches) _ = list(res) else: if self.G_agg == 'average': _enet_regression_multi_gram( G_average, Dx, X, self.code_, sample_indices, self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) else: _enet_regression_single_gram( G, Dx, X, self.code_, sample_indices, self.code_l1_ratio, self.code_alpha, self.code_pos, self.tol, self.max_iter) def _update_dict(self, subset, w): """Dictionary update part Parameters ---------- subset: ndarray, Subset of features to update. """ ger, = scipy.linalg.get_blas_funcs(('ger',), (self.C_, self.components_)) len_subset = subset.shape[0] n_components, n_features = self.components_.shape components_subset = self.components_[:, subset] atom_temp = np.zeros(len_subset, dtype=self.components_.dtype) gradient_subset = self.gradient_[:, subset] if self.G_agg == 'full' and len_subset < n_features / 2.: self.G_ -= components_subset.dot(components_subset.T) gradient_subset -= self.C_.dot(components_subset) order = self.random_state.permutation(n_components) if self.optimizer == 'variational': for k in order: subset_norm = enet_norm(components_subset[k], self.comp_l1_ratio) self.comp_norm_[k] += subset_norm gradient_subset = ger(1.0, self.C_[k], components_subset[k], a=gradient_subset, overwrite_a=True) if self.C_[k, k] > 1e-20: components_subset[k] = gradient_subset[k] / self.C_[k, k] # Else do not update if self.comp_pos: components_subset[components_subset < 0] = 0 enet_projection(components_subset[k], atom_temp, self.comp_norm_[k], self.comp_l1_ratio) components_subset[k] = atom_temp subset_norm = enet_norm(components_subset[k], self.comp_l1_ratio) self.comp_norm_[k] -= subset_norm gradient_subset = ger(-1.0, self.C_[k], components_subset[k], a=gradient_subset, overwrite_a=True) else: for k in order: subset_norm = enet_norm(components_subset[k], self.comp_l1_ratio) self.comp_norm_[k] += subset_norm components_subset += w * self.step_size * gradient_subset for k in range(self.n_components): enet_projection(components_subset[k], atom_temp, self.comp_norm_[k], self.comp_l1_ratio) components_subset[k] = atom_temp subset_norm = enet_norm(components_subset[k], self.comp_l1_ratio) self.comp_norm_[k] -= subset_norm self.components_[:, subset] = components_subset if self.G_agg == 'full': if len_subset < n_features / 2.: self.G_ += components_subset.dot(components_subset.T) else: self.G_[:] = self.components_.dot(self.components_.T) def _exit(self): """Useful to delete G_average_ memorymap when the algorithm is interrupted/completed""" if hasattr(self, 'G_average_mmap_'): self.G_average_mmap_.close() class Coder(CodingMixin, BaseEstimator): def __init__(self, dictionary, code_alpha=1, code_l1_ratio=1, tol=1e-2, max_iter=100, code_pos=False, random_state=None, n_threads=1 ): self._set_coding_params(dictionary.shape[0], code_l1_ratio=code_l1_ratio, code_alpha=code_alpha, code_pos=code_pos, random_state=random_state, tol=tol, max_iter=max_iter, n_threads=n_threads) self.components_ = dictionary def fit(self, X=None): return self
bsd-2-clause
stiebels/letor_to_pandas_converter
Converter.py
1
2061
import pandas as pd class Letor_Converter(object): ''' Class Converter implements parsing from original letor txt files to pandas data frame representation. ''' def __init__(self, path): ''' Arguments: path: path to letor txt file ''' self._path = path @property def path(self): return self._path @path.setter def path(self, p): self._path = p def _load_file(self): ''' Loads and parses raw letor txt file. Return: letor txt file parsed to csv in raw format ''' return pd.read_csv(str(self._path), sep=" ", header=None) def _drop_col(self, df): ''' Drops last column, which was added in the parsing procedure due to a trailing white space for each sample in the text file Arguments: df: pandas dataframe Return: df: original df with last column dropped ''' return df.drop(df.columns[-1], axis=1) def _split_colon(self, df): ''' Splits the data on the colon and transforms it into a tabular format where columns are features and rows samples. Cells represent feature values per sample. Arguments: df: pandas dataframe object Return: df: original df with string pattern ':' removed; columns named appropriately ''' for col in range(1,len(df.columns)): df.loc[:,col] = df.loc[:,col].apply(lambda x: str(x).split(':')[1]) df.columns = ['rel', 'qid'] + [str(x) for x in range(1,len(df.columns)-1)] # renaming cols return df def convert(self): ''' Performs final conversion. Return: fully converted pandas dataframe ''' df_raw = self._load_file(self._path) df_drop = self._drop_col(df_raw) return self._split_colon(df_drop)
mit
paulperry/quant
vti_agg_7030.py
1
2066
''' A Basic Markowitz portfolio of Stocks and Bonds. Change it from 50/50 to 60/40 or 70/30. ''' from __future__ import division import datetime import pytz import pandas as pd from zipline.api import order_target_percent def initialize(context): set_long_only() set_symbol_lookup_date('2005-01-01') context.secs = symbols( 'VTI', 'AGG') # Securities context.pcts = [ 0.7, 0.3 ] # Percentages context.ETFs = zip(context.secs, context.pcts) # Check to rebalance every month, but only do it in December schedule_function(rebalance, date_rules.month_end(days_offset=5), # trade before EOY settlment dates time_rules.market_open(minutes=45)) # trade after 10:15am return def rebalance(context, data): threshold = 0.05 # trigger a rebalance if we are off by this threshold (5%) # Get the current exchange time, in the exchange timezone exchange_time = pd.Timestamp(get_datetime()).tz_convert('US/Eastern') if exchange_time.month < 12: return # bail if it's not December need_full_rebalance = False portfolio_value = context.portfolio.portfolio_value # rebalance if we have too much cash if context.portfolio.cash / portfolio_value > threshold: need_full_rebalance = True # or rebalance if an ETF is off by the given threshold for sid, target in context.ETFs: pos = context.portfolio.positions[sid] position_pct = (pos.amount * pos.last_sale_price) / portfolio_value # if any position is out of range then rebalance the whole portfolio if abs(position_pct - target) > threshold: need_full_rebalance = True break # don't bother checking the rest # perform the full rebalance if we flagged the need to do so if need_full_rebalance: for sid, target in context.ETFs: order_target_percent(sid, target) log.info("Rebalanced at %s" % str(exchange_time)) context.rebalance_date = exchange_time def handle_data(context, data): pass
mit
zaxtax/scikit-learn
sklearn/cluster/__init__.py
364
1228
""" The :mod:`sklearn.cluster` module gathers popular unsupervised clustering algorithms. """ from .spectral import spectral_clustering, SpectralClustering from .mean_shift_ import (mean_shift, MeanShift, estimate_bandwidth, get_bin_seeds) from .affinity_propagation_ import affinity_propagation, AffinityPropagation from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree, FeatureAgglomeration) from .k_means_ import k_means, KMeans, MiniBatchKMeans from .dbscan_ import dbscan, DBSCAN from .bicluster import SpectralBiclustering, SpectralCoclustering from .birch import Birch __all__ = ['AffinityPropagation', 'AgglomerativeClustering', 'Birch', 'DBSCAN', 'KMeans', 'FeatureAgglomeration', 'MeanShift', 'MiniBatchKMeans', 'SpectralClustering', 'affinity_propagation', 'dbscan', 'estimate_bandwidth', 'get_bin_seeds', 'k_means', 'linkage_tree', 'mean_shift', 'spectral_clustering', 'ward_tree', 'SpectralBiclustering', 'SpectralCoclustering']
bsd-3-clause
jmschrei/pomegranate
tests/test_markov_network.py
1
18046
# test_bayes_net.py # Authors: Jacob Schreiber <jmschreiber91@gmail.com> ''' These are unit tests for the Markov network model of pomegranate. ''' from __future__ import division from pomegranate import JointProbabilityTable from pomegranate import MarkovNetwork from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import with_setup from nose.tools import assert_equal from nose.tools import assert_raises from nose.tools import assert_true from nose.tools import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal import pandas import random, numpy import sys def setup_markov_network_int(): global d1, d2, d3 global model1, model2, model3, model4 d1 = JointProbabilityTable([ [0, 0, 0.1], [0, 1, 0.2], [1, 0, 0.4], [1, 1, 0.3]], [0, 1]) d2 = JointProbabilityTable([ [0, 0, 0, 0.05], [0, 0, 1, 0.15], [0, 1, 0, 0.07], [0, 1, 1, 0.03], [1, 0, 0, 0.12], [1, 0, 1, 0.18], [1, 1, 0, 0.10], [1, 1, 1, 0.30]], [1, 2, 3]) d3 = JointProbabilityTable([ [0, 0, 0, 0.08], [0, 0, 1, 0.12], [0, 1, 0, 0.11], [0, 1, 1, 0.19], [1, 0, 0, 0.04], [1, 0, 1, 0.06], [1, 1, 0, 0.23], [1, 1, 1, 0.17]], [2, 3, 4]) model1 = MarkovNetwork([d1]) model1.bake() model2 = MarkovNetwork([d1, d2]) model2.bake() model3 = MarkovNetwork([d1, d2, d3]) model3.bake() model4 = MarkovNetwork([d1, d3]) model4.bake() def setup_markov_network_str(): global d1, d2, d3 global model1, model2, model3, model4 d1 = JointProbabilityTable([ ['0', '0', 0.1], ['0', '1', 0.2], ['1', '0', 0.4], ['1', '1', 0.3]], [0, 1]) d2 = JointProbabilityTable([ ['0', '0', '0', 0.05], ['0', '0', '1', 0.15], ['0', '1', '0', 0.07], ['0', '1', '1', 0.03], ['1', '0', '0', 0.12], ['1', '0', '1', 0.18], ['1', '1', '0', 0.10], ['1', '1', '1', 0.30]], [1, 2, 3]) d3 = JointProbabilityTable([ ['0', '0', '0', 0.08], ['0', '0', '1', 0.12], ['0', '1', '0', 0.11], ['0', '1', '1', 0.19], ['1', '0', '0', 0.04], ['1', '0', '1', 0.06], ['1', '1', '0', 0.23], ['1', '1', '1', 0.17]], [2, 3, 4]) model1 = MarkovNetwork([d1]) model1.bake() model2 = MarkovNetwork([d1, d2]) model2.bake() model3 = MarkovNetwork([d1, d2, d3]) model3.bake() model4 = MarkovNetwork([d1, d3]) model4.bake() def setup_markov_network_bool(): global d1, d2, d3 global model1, model2, model3, model4 d1 = JointProbabilityTable([ [False, False, 0.1], [False, True, 0.2], [True, False, 0.4], [True, True, 0.3]], [0, 1]) d2 = JointProbabilityTable([ [False, False, False, 0.05], [False, False, True, 0.15], [False, True, False, 0.07], [False, True, True, 0.03], [True, False, False, 0.12], [True, False, True, 0.18], [True, True, False, 0.10], [True, True, True, 0.30]], [1, 2, 3]) d3 = JointProbabilityTable([ [False, False, False, 0.08], [False, False, True, 0.12], [False, True, False, 0.11], [False, True, True, 0.19], [True, False, False, 0.04], [True, False, True, 0.06], [True, True, False, 0.23], [True, True, True, 0.17]], [2, 3, 4]) model1 = MarkovNetwork([d1]) model1.bake() model2 = MarkovNetwork([d1, d2]) model2.bake() model3 = MarkovNetwork([d1, d2, d3]) model3.bake() model4 = MarkovNetwork([d1, d3]) model4.bake() def setup_markov_network_mixed(): global d1, d2, d3 global model1, model2, model3, model4 d1 = JointProbabilityTable([ [False, 'blue', 0.1], [False, 'red', 0.2], [True, 'blue', 0.4], [True, 'red', 0.3]], [0, 1]) d2 = JointProbabilityTable([ ['blue', False, 0, 0.05], ['blue', False, 1, 0.15], ['blue', True, 0, 0.07], ['blue', True, 1, 0.03], ['red', False, 0, 0.12], ['red', False, 1, 0.18], ['red', True, 0, 0.10], ['red', True, 1, 0.30]], [1, 2, 3]) d3 = JointProbabilityTable([ [False, 0, 'a', 0.08], [False, 0, 'b', 0.12], [False, 1, 'a', 0.11], [False, 1, 'b', 0.19], [True, 0, 'a', 0.04], [True, 0, 'b', 0.06], [True, 1, 'a', 0.23], [True, 1, 'b', 0.17]], [2, 3, 4]) model1 = MarkovNetwork([d1]) model1.bake() model2 = MarkovNetwork([d1, d2]) model2.bake() model3 = MarkovNetwork([d1, d2, d3]) model3.bake() model4 = MarkovNetwork([d1, d3]) model4.bake() def teardown(): pass def test_initialize(): assert_raises(ValueError, MarkovNetwork, []) d1 = JointProbabilityTable([ [0, 0, 0.2], [0, 1, 0.2], [1, 0, 0.4], [1, 1, 0.2]], [0, 1]) model = MarkovNetwork([d1]) @with_setup(setup_markov_network_int, teardown) def test_structure(): assert_equal(model1.structure, ((0, 1),)) assert_equal(model2.structure, ((0, 1), (1, 2, 3))) assert_equal(model3.structure, ((0, 1), (1, 2, 3), (2, 3, 4))) assert_equal(model4.structure, ((0, 1), (2, 3, 4))) @with_setup(setup_markov_network_int, teardown) def test_partition(): model3.bake() assert_true(model3.partition != float("inf")) model3.bake(calculate_partition=False) assert_true(model3.partition == float("inf")) @with_setup(setup_markov_network_int, teardown) def test_d(): assert_equal(model1.d, 2) assert_equal(model2.d, 4) assert_equal(model3.d, 5) assert_equal(model4.d, 5) @with_setup(setup_markov_network_mixed, teardown) def test_d_mixed(): assert_equal(model1.d, 2) assert_equal(model2.d, 4) assert_equal(model3.d, 5) assert_equal(model4.d, 5) @with_setup(setup_markov_network_int, teardown) def test_log_probability_int(): x = [1, 0] logp1 = model1.log_probability(x) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [1, 0, 1, 1] logp1 = model2.log_probability(x) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -3.7297014467295373) x = [1, 0, 1, 0, 1] logp1 = model3.log_probability(x) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -4.429966143312331) logp3 = model4.log_probability(x) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, -3.7297014486341915) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_str, teardown) def test_log_probability_str(): x = ['1', '0'] logp1 = model1.log_probability(x) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = ['1', '0', '1', '1'] logp1 = model2.log_probability(x) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -3.7297014467295373) x = ['1', '0', '1', '0', '1'] logp1 = model3.log_probability(x) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -4.429966143312331) logp3 = model4.log_probability(x) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, -3.7297014486341915) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_bool, teardown) def test_log_probability_bool(): x = [True, False] logp1 = model1.log_probability(x) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [True, False, True, True] logp1 = model2.log_probability(x) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -3.7297014467295373) x = [True, False, True, False, True] logp1 = model3.log_probability(x) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -4.429966143312331) logp3 = model4.log_probability(x) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, -3.7297014486341915) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_mixed, teardown) def test_log_probability_mixed(): x = [True, 'blue'] logp1 = model1.log_probability(x) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [True, 'blue', True, 1] logp1 = model2.log_probability(x) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -3.7297014467295373) x = [1, 'blue', True, 0, 'b'] logp1 = model3.log_probability(x) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_raises(AssertionError, assert_almost_equal, logp1, logp2) assert_almost_equal(logp1, -4.429966143312331) logp3 = model4.log_probability(x) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, -3.7297014486341915) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_int, teardown) def test_log_probability_unnormalized_int(): x = [1, 0] logp1 = model1.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [1, 0, 1, 1] logp1 = model2.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.03)) x = [1, 0, 1, 0, 1] logp1 = model3.log_probability(x, unnormalized=True) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.07 * 0.06)) logp3 = model4.log_probability(x, unnormalized=True) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, numpy.log(0.4 * 0.06)) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_str, teardown) def test_log_probability_unnormalized_str(): x = ['1', '0'] logp1 = model1.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = ['1', '0', '1', '1'] logp1 = model2.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.03)) x = ['1', '0', '1', '0', '1'] logp1 = model3.log_probability(x, unnormalized=True) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.07 * 0.06)) logp3 = model4.log_probability(x, unnormalized=True) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, numpy.log(0.4 * 0.06)) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_bool, teardown) def test_log_probability_unnormalized_bool(): x = [True, False] logp1 = model1.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [True, False, True, True] logp1 = model2.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.03)) x = [True, False, True, False, True] logp1 = model3.log_probability(x, unnormalized=True) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.07 * 0.06)) logp3 = model4.log_probability(x, unnormalized=True) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, numpy.log(0.4 * 0.06)) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_mixed, teardown) def test_log_probability_unnormalized_mixed(): x = [True, 'blue'] logp1 = model1.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4)) x = [True, 'blue', True, 1] logp1 = model2.log_probability(x, unnormalized=True) logp2 = d1.log_probability(x[:2]) + d2.log_probability(x[1:]) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.03)) x = [1, 'blue', True, 0, 'b'] logp1 = model3.log_probability(x, unnormalized=True) logp2 = (d1.log_probability(x[:2]) + d2.log_probability(x[1:4]) + d3.log_probability(x[2:])) assert_almost_equal(logp1, logp2) assert_almost_equal(logp1, numpy.log(0.4 * 0.07 * 0.06)) logp3 = model4.log_probability(x, unnormalized=True) logp4 = d1.log_probability(x[:2]) + d3.log_probability(x[2:]) assert_almost_equal(logp3, logp4) assert_almost_equal(logp3, numpy.log(0.4 * 0.06)) assert_raises(AssertionError, assert_almost_equal, logp1, logp3) @with_setup(setup_markov_network_int, teardown) def test_predict_int(): assert_array_equal(model1.predict([[1, None]]), [[1, 0]]) assert_array_equal(model1.predict([[None, 1]]), [[1, 1]]) assert_array_equal(model2.predict([[1, 0, None, None]]), [[1, 0, 0, 1]]) assert_array_equal(model2.predict([[0, 0, None, None]]), [[0, 0, 0, 1]]) assert_array_equal(model2.predict([[None, 1, None, None]]), [[1, 1, 1, 1]]) assert_array_equal(model2.predict([[None, 1, 1, None]]), [[1, 1, 1, 1]]) assert_array_equal(model2.predict([[None, 1, None, 0]]), [[1, 1, 0, 0]]) assert_array_equal(model3.predict([[1, 0, None, 1, None]]), [[1, 0, 0, 1, 1]]) assert_array_equal(model3.predict([[None, 0, None, 1, None]]), [[1, 0, 0, 1, 1]]) assert_array_equal(model3.predict([[1, 0, None, 0, 1]]), [[1, 0, 0, 0, 1]]) assert_array_equal(model3.predict([[None, None, None, None, None]]), [[1, 1, 1, 1, 1]]) assert_array_equal(model3.predict([[None, None, None, None, 1]]), [[1, 1, 0, 1, 1]]) @with_setup(setup_markov_network_str, teardown) def test_predict_str(): assert_array_equal(model1.predict([['1', None]]), [['1', '0']]) assert_array_equal(model1.predict([[None, '1']]), [['1', '1']]) assert_array_equal(model2.predict([['1', '0', None, None]]), [['1', '0', '0', '1']]) assert_array_equal(model2.predict([['0', '0', None, None]]), [['0', '0', '0', '1']]) assert_array_equal(model2.predict([[None, '1', None, None]]), [['1', '1', '1', '1']]) assert_array_equal(model2.predict([[None, '1', '1', None]]), [['1', '1', '1', '1']]) assert_array_equal(model2.predict([[None, '1', None, '0']]), [['1', '1', '0', '0']]) assert_array_equal(model3.predict([['1', '0', None, '1', None]]), [['1', '0', '0', '1', '1']]) assert_array_equal(model3.predict([[None, '0', None, '1', None]]), [['1', '0', '0', '1', '1']]) assert_array_equal(model3.predict([['1', '0', None, '0', '1']]), [['1', '0', '0', '0', '1']]) assert_array_equal(model3.predict([[None, None, None, None, None]]), [['1', '1', '1', '1', '1']]) assert_array_equal(model3.predict([[None, None, None, None, '1']]), [['1', '1', '0', '1', '1']]) @with_setup(setup_markov_network_bool, teardown) def test_predict_bool(): assert_array_equal(model1.predict([[True, None]]), [[True, False]]) assert_array_equal(model1.predict([[None, True]]), [[True, True]]) assert_array_equal(model2.predict([[True, False, None, None]]), [[True, False, False, True]]) assert_array_equal(model2.predict([[False, False, None, None]]), [[False, False, False, True]]) assert_array_equal(model2.predict([[None, True, None, None]]), [[True, True, True, True]]) assert_array_equal(model2.predict([[None, True, True, None]]), [[True, True, True, True]]) assert_array_equal(model2.predict([[None, True, None, False]]), [[True, True, False, False]]) assert_array_equal(model3.predict([[True, False, None, True, None]]), [[True, False, False, True, True]]) assert_array_equal(model3.predict([[None, False, None, True, None]]), [[True, False, False, True, True]]) assert_array_equal(model3.predict([[True, False, None, False, True]]), [[True, False, False, False, True]]) assert_array_equal(model3.predict([[None, None, None, None, None]]), [[True, True, True, True, True]]) assert_array_equal(model3.predict([[None, None, None, None, True]]), [[True, True, False, True, True]]) @with_setup(setup_markov_network_mixed, teardown) def test_predict_mixed(): assert_array_equal(model1.predict([[True, None]]), numpy.array([[True, 'blue']], dtype=object)) assert_array_equal(model1.predict([[None, 'red']]), numpy.array([[True, 'red']], dtype=object)) assert_array_equal(model2.predict([[True, 'blue', None, None]]), numpy.array([[True, 'blue', False, 1]], dtype=object)) assert_array_equal(model2.predict([[False, 'blue', None, None]]), numpy.array([[False, 'blue', False, 1]], dtype=object)) assert_array_equal(model2.predict([[None, 'red', None, None]]), numpy.array([[True, 'red', True, 1]], dtype=object)) assert_array_equal(model2.predict([[None, 'red', True, None]]), numpy.array([[True, 'red', True, 1]], dtype=object)) assert_array_equal(model2.predict([[None, 'red', None, 0]]), numpy.array([[True, 'red', False, 0]], dtype=object)) assert_array_equal(model3.predict([[True, 'blue', None, 1, None]]), numpy.array([[True, 'blue', False, 1, 'b']], dtype=object))
mit
breznak/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py
69
50262
""" A PostScript backend, which can produce both PostScript .ps and .eps """ from __future__ import division import glob, math, os, shutil, sys, time def _fn_name(): return sys._getframe(1).f_code.co_name try: from hashlib import md5 except ImportError: from md5 import md5 #Deprecated in 2.5 from tempfile import gettempdir from cStringIO import StringIO from matplotlib import verbose, __version__, rcParams from matplotlib._pylab_helpers import Gcf from matplotlib.afm import AFM from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureManagerBase, FigureCanvasBase from matplotlib.cbook import is_string_like, get_realpath_and_stat, \ is_writable_file_like, maxdict from matplotlib.mlab import quad2cubic from matplotlib.figure import Figure from matplotlib.font_manager import findfont, is_opentype_cff_font from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING from matplotlib.ttconv import convert_ttf_to_ps from matplotlib.mathtext import MathTextParser from matplotlib._mathtext_data import uni2type1 from matplotlib.text import Text from matplotlib.path import Path from matplotlib.transforms import IdentityTransform import numpy as npy import binascii import re try: set except NameError: from sets import Set as set if sys.platform.startswith('win'): cmd_split = '&' else: cmd_split = ';' backend_version = 'Level II' debugPS = 0 papersize = {'letter': (8.5,11), 'legal': (8.5,14), 'ledger': (11,17), 'a0': (33.11,46.81), 'a1': (23.39,33.11), 'a2': (16.54,23.39), 'a3': (11.69,16.54), 'a4': (8.27,11.69), 'a5': (5.83,8.27), 'a6': (4.13,5.83), 'a7': (2.91,4.13), 'a8': (2.07,2.91), 'a9': (1.457,2.05), 'a10': (1.02,1.457), 'b0': (40.55,57.32), 'b1': (28.66,40.55), 'b2': (20.27,28.66), 'b3': (14.33,20.27), 'b4': (10.11,14.33), 'b5': (7.16,10.11), 'b6': (5.04,7.16), 'b7': (3.58,5.04), 'b8': (2.51,3.58), 'b9': (1.76,2.51), 'b10': (1.26,1.76)} def _get_papertype(w, h): keys = papersize.keys() keys.sort() keys.reverse() for key in keys: if key.startswith('l'): continue pw, ph = papersize[key] if (w < pw) and (h < ph): return key else: return 'a0' def _num_to_str(val): if is_string_like(val): return val ival = int(val) if val==ival: return str(ival) s = "%1.3f"%val s = s.rstrip("0") s = s.rstrip(".") return s def _nums_to_str(*args): return ' '.join(map(_num_to_str,args)) def quote_ps_string(s): "Quote dangerous characters of S for use in a PostScript string constant." s=s.replace("\\", "\\\\") s=s.replace("(", "\\(") s=s.replace(")", "\\)") s=s.replace("'", "\\251") s=s.replace("`", "\\301") s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s) return s def seq_allequal(seq1, seq2): """ seq1 and seq2 are either None or sequences or numerix arrays Return True if both are None or both are seqs with identical elements """ if seq1 is None: return seq2 is None if seq2 is None: return False #ok, neither are None:, assuming iterable if len(seq1) != len(seq2): return False return npy.alltrue(npy.equal(seq1, seq2)) class RendererPS(RendererBase): """ The renderer handles all the drawing primitives using a graphics context instance that controls the colors/styles. """ fontd = maxdict(50) afmfontd = maxdict(50) def __init__(self, width, height, pswriter, imagedpi=72): """ Although postscript itself is dpi independent, we need to imform the image code about a requested dpi to generate high res images and them scale them before embeddin them """ RendererBase.__init__(self) self.width = width self.height = height self._pswriter = pswriter if rcParams['text.usetex']: self.textcnt = 0 self.psfrag = [] self.imagedpi = imagedpi if rcParams['path.simplify']: self.simplify = (width * imagedpi, height * imagedpi) else: self.simplify = None # current renderer state (None=uninitialised) self.color = None self.linewidth = None self.linejoin = None self.linecap = None self.linedash = None self.fontname = None self.fontsize = None self.hatch = None self.image_magnification = imagedpi/72.0 self._clip_paths = {} self._path_collection_id = 0 self.used_characters = {} self.mathtext_parser = MathTextParser("PS") def track_characters(self, font, s): """Keeps track of which characters are required from each font.""" realpath, stat_key = get_realpath_and_stat(font.fname) used_characters = self.used_characters.setdefault( stat_key, (realpath, set())) used_characters[1].update([ord(x) for x in s]) def merge_used_characters(self, other): for stat_key, (realpath, charset) in other.items(): used_characters = self.used_characters.setdefault( stat_key, (realpath, set())) used_characters[1].update(charset) def set_color(self, r, g, b, store=1): if (r,g,b) != self.color: if r==g and r==b: self._pswriter.write("%1.3f setgray\n"%r) else: self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b)) if store: self.color = (r,g,b) def set_linewidth(self, linewidth, store=1): if linewidth != self.linewidth: self._pswriter.write("%1.3f setlinewidth\n"%linewidth) if store: self.linewidth = linewidth def set_linejoin(self, linejoin, store=1): if linejoin != self.linejoin: self._pswriter.write("%d setlinejoin\n"%linejoin) if store: self.linejoin = linejoin def set_linecap(self, linecap, store=1): if linecap != self.linecap: self._pswriter.write("%d setlinecap\n"%linecap) if store: self.linecap = linecap def set_linedash(self, offset, seq, store=1): if self.linedash is not None: oldo, oldseq = self.linedash if seq_allequal(seq, oldseq): return if seq is not None and len(seq): s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset) self._pswriter.write(s) else: self._pswriter.write("[] 0 setdash\n") if store: self.linedash = (offset,seq) def set_font(self, fontname, fontsize, store=1): if rcParams['ps.useafm']: return if (fontname,fontsize) != (self.fontname,self.fontsize): out = ("/%s findfont\n" "%1.3f scalefont\n" "setfont\n" % (fontname,fontsize)) self._pswriter.write(out) if store: self.fontname = fontname if store: self.fontsize = fontsize def set_hatch(self, hatch): """ hatch can be one of: / - diagonal hatching \ - back diagonal | - vertical - - horizontal + - crossed X - crossed diagonal letters can be combined, in which case all the specified hatchings are done if same letter repeats, it increases the density of hatching in that direction """ hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0} for letter in hatch: if (letter == '/'): hatches['diag2'] += 1 elif (letter == '\\'): hatches['diag1'] += 1 elif (letter == '|'): hatches['vert'] += 1 elif (letter == '-'): hatches['horiz'] += 1 elif (letter == '+'): hatches['horiz'] += 1 hatches['vert'] += 1 elif (letter.lower() == 'x'): hatches['diag1'] += 1 hatches['diag2'] += 1 def do_hatch(angle, density): if (density == 0): return "" return """\ gsave eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth /hatchgap %d def pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def hatchl cvi hatchgap idiv hatchgap mul hatchgap hatchr cvi hatchgap idiv hatchgap mul {hatcht m 0 hatchb hatcht sub r } for stroke grestore """ % (angle, 12/density) self._pswriter.write("gsave\n") self._pswriter.write(do_hatch(90, hatches['horiz'])) self._pswriter.write(do_hatch(0, hatches['vert'])) self._pswriter.write(do_hatch(45, hatches['diag1'])) self._pswriter.write(do_hatch(-45, hatches['diag2'])) self._pswriter.write("grestore\n") def get_canvas_width_height(self): 'return the canvas width and height in display coords' return self.width, self.height def get_text_width_height_descent(self, s, prop, ismath): """ get the width and height in display coords of the string s with FontPropertry prop """ if rcParams['text.usetex']: texmanager = self.get_texmanager() fontsize = prop.get_size_in_points() l,b,r,t = texmanager.get_ps_bbox(s, fontsize) w = (r-l) h = (t-b) # TODO: We need a way to get a good baseline from # text.usetex return w, h, 0 if ismath: width, height, descent, pswriter, used_characters = \ self.mathtext_parser.parse(s, 72, prop) return width, height, descent if rcParams['ps.useafm']: if ismath: s = s[1:-1] font = self._get_font_afm(prop) l,b,w,h,d = font.get_str_bbox_and_descent(s) fontsize = prop.get_size_in_points() scale = 0.001*fontsize w *= scale h *= scale d *= scale return w, h, d font = self._get_font_ttf(prop) font.set_text(s, 0.0, flags=LOAD_NO_HINTING) w, h = font.get_width_height() w /= 64.0 # convert from subpixels h /= 64.0 d = font.get_descent() d /= 64.0 #print s, w, h return w, h, d def flipy(self): 'return true if small y numbers are top for renderer' return False def _get_font_afm(self, prop): key = hash(prop) font = self.afmfontd.get(key) if font is None: fname = findfont(prop, fontext='afm') font = self.afmfontd.get(fname) if font is None: font = AFM(file(findfont(prop, fontext='afm'))) self.afmfontd[fname] = font self.afmfontd[key] = font return font def _get_font_ttf(self, prop): key = hash(prop) font = self.fontd.get(key) if font is None: fname = findfont(prop) font = self.fontd.get(fname) if font is None: font = FT2Font(str(fname)) self.fontd[fname] = font self.fontd[key] = font font.clear() size = prop.get_size_in_points() font.set_size(size, 72.0) return font def _rgba(self, im): return im.as_rgba_str() def _rgb(self, im): h,w,s = im.as_rgba_str() rgba = npy.fromstring(s, npy.uint8) rgba.shape = (h, w, 4) rgb = rgba[:,:,:3] return h, w, rgb.tostring() def _gray(self, im, rc=0.3, gc=0.59, bc=0.11): rgbat = im.as_rgba_str() rgba = npy.fromstring(rgbat[2], npy.uint8) rgba.shape = (rgbat[0], rgbat[1], 4) rgba_f = rgba.astype(npy.float32) r = rgba_f[:,:,0] g = rgba_f[:,:,1] b = rgba_f[:,:,2] gray = (r*rc + g*gc + b*bc).astype(npy.uint8) return rgbat[0], rgbat[1], gray.tostring() def _hex_lines(self, s, chars_per_line=128): s = binascii.b2a_hex(s) nhex = len(s) lines = [] for i in range(0,nhex,chars_per_line): limit = min(i+chars_per_line, nhex) lines.append(s[i:limit]) return lines def get_image_magnification(self): """ Get the factor by which to magnify images passed to draw_image. Allows a backend to have images at a different resolution to other artists. """ return self.image_magnification def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None): """ Draw the Image instance into the current axes; x is the distance in pixels from the left hand side of the canvas and y is the distance from bottom bbox is a matplotlib.transforms.BBox instance for clipping, or None """ im.flipud_out() if im.is_grayscale: h, w, bits = self._gray(im) imagecmd = "image" else: h, w, bits = self._rgb(im) imagecmd = "false 3 colorimage" hexlines = '\n'.join(self._hex_lines(bits)) xscale, yscale = ( w/self.image_magnification, h/self.image_magnification) figh = self.height*72 #print 'values', origin, flipud, figh, h, y clip = [] if bbox is not None: clipx,clipy,clipw,cliph = bbox.bounds clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy)) if clippath is not None: id = self._get_clip_path(clippath, clippath_trans) clip.append('%s' % id) clip = '\n'.join(clip) #y = figh-(y+h) ps = """gsave %(clip)s %(x)s %(y)s translate %(xscale)s %(yscale)s scale /DataString %(w)s string def %(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ] { currentfile DataString readhexstring pop } bind %(imagecmd)s %(hexlines)s grestore """ % locals() self._pswriter.write(ps) # unflip im.flipud_out() def _convert_path(self, path, transform, simplify=None): path = transform.transform_path(path) ps = [] last_points = None for points, code in path.iter_segments(simplify): if code == Path.MOVETO: ps.append("%g %g m" % tuple(points)) elif code == Path.LINETO: ps.append("%g %g l" % tuple(points)) elif code == Path.CURVE3: points = quad2cubic(*(list(last_points[-2:]) + list(points))) ps.append("%g %g %g %g %g %g c" % tuple(points[2:])) elif code == Path.CURVE4: ps.append("%g %g %g %g %g %g c" % tuple(points)) elif code == Path.CLOSEPOLY: ps.append("cl") last_points = points ps = "\n".join(ps) return ps def _get_clip_path(self, clippath, clippath_transform): id = self._clip_paths.get((clippath, clippath_transform)) if id is None: id = 'c%x' % len(self._clip_paths) ps_cmd = ['/%s {' % id] ps_cmd.append(self._convert_path(clippath, clippath_transform)) ps_cmd.extend(['clip', 'newpath', '} bind def\n']) self._pswriter.write('\n'.join(ps_cmd)) self._clip_paths[(clippath, clippath_transform)] = id return id def draw_path(self, gc, path, transform, rgbFace=None): """ Draws a Path instance using the given affine transform. """ ps = self._convert_path(path, transform, self.simplify) self._draw_ps(ps, gc, rgbFace) def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None): """ Draw the markers defined by path at each of the positions in x and y. path coordinates are points, x and y coords will be transformed by the transform """ if debugPS: self._pswriter.write('% draw_markers \n') write = self._pswriter.write if rgbFace: if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]: ps_color = '%1.3f setgray' % rgbFace[0] else: ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace # construct the generic marker command: ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global ps_cmd.append(self._convert_path(marker_path, marker_trans)) if rgbFace: ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore']) ps_cmd.extend(['stroke', 'grestore', '} bind def']) tpath = trans.transform_path(path) for vertices, code in tpath.iter_segments(): if len(vertices): x, y = vertices[-2:] ps_cmd.append("%g %g o" % (x, y)) ps = '\n'.join(ps_cmd) self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False) def draw_path_collection(self, master_transform, cliprect, clippath, clippath_trans, paths, all_transforms, offsets, offsetTrans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, urls): write = self._pswriter.write path_codes = [] for i, (path, transform) in enumerate(self._iter_collection_raw_paths( master_transform, paths, all_transforms)): name = 'p%x_%x' % (self._path_collection_id, i) ps_cmd = ['/%s {' % name, 'newpath', 'translate'] ps_cmd.append(self._convert_path(path, transform)) ps_cmd.extend(['} bind def\n']) write('\n'.join(ps_cmd)) path_codes.append(name) for xo, yo, path_id, gc, rgbFace in self._iter_collection( path_codes, cliprect, clippath, clippath_trans, offsets, offsetTrans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, urls): ps = "%g %g %s" % (xo, yo, path_id) self._draw_ps(ps, gc, rgbFace) self._path_collection_id += 1 def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'): """ draw a Text instance """ w, h, bl = self.get_text_width_height_descent(s, prop, ismath) fontsize = prop.get_size_in_points() corr = 0#w/2*(fontsize-10)/10 pos = _nums_to_str(x-corr, y) thetext = 'psmarker%d' % self.textcnt color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3] fontcmd = {'sans-serif' : r'{\sffamily %s}', 'monospace' : r'{\ttfamily %s}'}.get( rcParams['font.family'], r'{\rmfamily %s}') s = fontcmd % s tex = r'\color[rgb]{%s} %s' % (color, s) self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex)) ps = """\ gsave %(pos)s moveto (%(thetext)s) show grestore """ % locals() self._pswriter.write(ps) self.textcnt += 1 def draw_text(self, gc, x, y, s, prop, angle, ismath): """ draw a Text instance """ # local to avoid repeated attribute lookups write = self._pswriter.write if debugPS: write("% text\n") if ismath=='TeX': return self.tex(gc, x, y, s, prop, angle) elif ismath: return self.draw_mathtext(gc, x, y, s, prop, angle) elif isinstance(s, unicode): return self.draw_unicode(gc, x, y, s, prop, angle) elif rcParams['ps.useafm']: font = self._get_font_afm(prop) l,b,w,h = font.get_str_bbox(s) fontsize = prop.get_size_in_points() l *= 0.001*fontsize b *= 0.001*fontsize w *= 0.001*fontsize h *= 0.001*fontsize if angle==90: l,b = -b, l # todo generalize for arb rotations pos = _nums_to_str(x-l, y-b) thetext = '(%s)' % s fontname = font.get_fontname() fontsize = prop.get_size_in_points() rotate = '%1.1f rotate' % angle setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3] #h = 0 ps = """\ gsave /%(fontname)s findfont %(fontsize)s scalefont setfont %(pos)s moveto %(rotate)s %(thetext)s %(setcolor)s show grestore """ % locals() self._draw_ps(ps, gc, None) else: font = self._get_font_ttf(prop) font.set_text(s, 0, flags=LOAD_NO_HINTING) self.track_characters(font, s) self.set_color(*gc.get_rgb()) self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points()) write("%s m\n"%_nums_to_str(x,y)) if angle: write("gsave\n") write("%s rotate\n"%_num_to_str(angle)) descent = font.get_descent() / 64.0 if descent: write("0 %s rmoveto\n"%_num_to_str(descent)) write("(%s) show\n"%quote_ps_string(s)) if angle: write("grestore\n") def new_gc(self): return GraphicsContextPS() def draw_unicode(self, gc, x, y, s, prop, angle): """draw a unicode string. ps doesn't have unicode support, so we have to do this the hard way """ if rcParams['ps.useafm']: self.set_color(*gc.get_rgb()) font = self._get_font_afm(prop) fontname = font.get_fontname() fontsize = prop.get_size_in_points() scale = 0.001*fontsize thisx = 0 thisy = font.get_str_bbox_and_descent(s)[4] * scale last_name = None lines = [] for c in s: name = uni2type1.get(ord(c), 'question') try: width = font.get_width_from_char_name(name) except KeyError: name = 'question' width = font.get_width_char('?') if last_name is not None: kern = font.get_kern_dist_from_name(last_name, name) else: kern = 0 last_name = name thisx += kern * scale lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name)) thisx += width * scale thetext = "\n".join(lines) ps = """\ gsave /%(fontname)s findfont %(fontsize)s scalefont setfont %(x)f %(y)f translate %(angle)f rotate %(thetext)s grestore """ % locals() self._pswriter.write(ps) else: font = self._get_font_ttf(prop) font.set_text(s, 0, flags=LOAD_NO_HINTING) self.track_characters(font, s) self.set_color(*gc.get_rgb()) self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points()) cmap = font.get_charmap() lastgind = None #print 'text', s lines = [] thisx = 0 thisy = font.get_descent() / 64.0 for c in s: ccode = ord(c) gind = cmap.get(ccode) if gind is None: ccode = ord('?') name = '.notdef' gind = 0 else: name = font.get_glyph_name(gind) glyph = font.load_char(ccode, flags=LOAD_NO_HINTING) if lastgind is not None: kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT) else: kern = 0 lastgind = gind thisx += kern/64.0 lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name)) thisx += glyph.linearHoriAdvance/65536.0 thetext = '\n'.join(lines) ps = """gsave %(x)f %(y)f translate %(angle)f rotate %(thetext)s grestore """ % locals() self._pswriter.write(ps) def draw_mathtext(self, gc, x, y, s, prop, angle): """ Draw the math text using matplotlib.mathtext """ if debugPS: self._pswriter.write("% mathtext\n") width, height, descent, pswriter, used_characters = \ self.mathtext_parser.parse(s, 72, prop) self.merge_used_characters(used_characters) self.set_color(*gc.get_rgb()) thetext = pswriter.getvalue() ps = """gsave %(x)f %(y)f translate %(angle)f rotate %(thetext)s grestore """ % locals() self._pswriter.write(ps) def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None): """ Emit the PostScript sniplet 'ps' with all the attributes from 'gc' applied. 'ps' must consist of PostScript commands to construct a path. The fill and/or stroke kwargs can be set to False if the 'ps' string already includes filling and/or stroking, in which case _draw_ps is just supplying properties and clipping. """ # local variable eliminates all repeated attribute lookups write = self._pswriter.write if debugPS and command: write("% "+command+"\n") mightstroke = (gc.get_linewidth() > 0.0 and (len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0)) stroke = stroke and mightstroke fill = (fill and rgbFace is not None and (len(rgbFace) <= 3 or rgbFace[3] != 0.0)) if mightstroke: self.set_linewidth(gc.get_linewidth()) jint = gc.get_joinstyle() self.set_linejoin(jint) cint = gc.get_capstyle() self.set_linecap(cint) self.set_linedash(*gc.get_dashes()) self.set_color(*gc.get_rgb()[:3]) write('gsave\n') cliprect = gc.get_clip_rectangle() if cliprect: x,y,w,h=cliprect.bounds write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y)) clippath, clippath_trans = gc.get_clip_path() if clippath: id = self._get_clip_path(clippath, clippath_trans) write('%s\n' % id) # Jochen, is the strip necessary? - this could be a honking big string write(ps.strip()) write("\n") if fill: if stroke: write("gsave\n") self.set_color(store=0, *rgbFace[:3]) write("fill\ngrestore\n") else: self.set_color(store=0, *rgbFace[:3]) write("fill\n") hatch = gc.get_hatch() if hatch: self.set_hatch(hatch) if stroke: write("stroke\n") write("grestore\n") class GraphicsContextPS(GraphicsContextBase): def get_capstyle(self): return {'butt':0, 'round':1, 'projecting':2}[GraphicsContextBase.get_capstyle(self)] def get_joinstyle(self): return {'miter':0, 'round':1, 'bevel':2}[GraphicsContextBase.get_joinstyle(self)] def new_figure_manager(num, *args, **kwargs): FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) canvas = FigureCanvasPS(thisFig) manager = FigureManagerPS(canvas, num) return manager class FigureCanvasPS(FigureCanvasBase): def draw(self): pass filetypes = {'ps' : 'Postscript', 'eps' : 'Encapsulated Postscript'} def get_default_filetype(self): return 'ps' def print_ps(self, outfile, *args, **kwargs): return self._print_ps(outfile, 'ps', *args, **kwargs) def print_eps(self, outfile, *args, **kwargs): return self._print_ps(outfile, 'eps', *args, **kwargs) def _print_ps(self, outfile, format, *args, **kwargs): papertype = kwargs.get("papertype", rcParams['ps.papersize']) papertype = papertype.lower() if papertype == 'auto': pass elif papertype not in papersize: raise RuntimeError( '%s is not a valid papertype. Use one \ of %s'% (papertype, ', '.join( papersize.keys() )) ) orientation = kwargs.get("orientation", "portrait").lower() if orientation == 'landscape': isLandscape = True elif orientation == 'portrait': isLandscape = False else: raise RuntimeError('Orientation must be "portrait" or "landscape"') self.figure.set_dpi(72) # Override the dpi kwarg imagedpi = kwargs.get("dpi", 72) facecolor = kwargs.get("facecolor", "w") edgecolor = kwargs.get("edgecolor", "w") if rcParams['text.usetex']: self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor, orientation, isLandscape, papertype) else: self._print_figure(outfile, format, imagedpi, facecolor, edgecolor, orientation, isLandscape, papertype) def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w', orientation='portrait', isLandscape=False, papertype=None): """ Render the figure to hardcopy. Set the figure patch face and edge colors. This is useful because some of the GUIs have a gray figure face color background and you'll probably want to override this on hardcopy If outfile is a string, it is interpreted as a file name. If the extension matches .ep* write encapsulated postscript, otherwise write a stand-alone PostScript file. If outfile is a file object, a stand-alone PostScript file is written into this file object. """ isEPSF = format == 'eps' passed_in_file_object = False if is_string_like(outfile): title = outfile tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest()) elif is_writable_file_like(outfile): title = None tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest()) passed_in_file_object = True else: raise ValueError("outfile must be a path or a file-like object") fh = file(tmpfile, 'w') # find the appropriate papertype width, height = self.figure.get_size_inches() if papertype == 'auto': if isLandscape: papertype = _get_papertype(height, width) else: papertype = _get_papertype(width, height) if isLandscape: paperHeight, paperWidth = papersize[papertype] else: paperWidth, paperHeight = papersize[papertype] if rcParams['ps.usedistiller'] and not papertype == 'auto': # distillers will improperly clip eps files if the pagesize is # too small if width>paperWidth or height>paperHeight: if isLandscape: papertype = _get_papertype(height, width) paperHeight, paperWidth = papersize[papertype] else: papertype = _get_papertype(width, height) paperWidth, paperHeight = papersize[papertype] # center the figure on the paper xo = 72*0.5*(paperWidth - width) yo = 72*0.5*(paperHeight - height) l, b, w, h = self.figure.bbox.bounds llx = xo lly = yo urx = llx + w ury = lly + h rotation = 0 if isLandscape: llx, lly, urx, ury = lly, llx, ury, urx xo, yo = 72*paperHeight - yo, xo rotation = 90 bbox = (llx, lly, urx, ury) # generate PostScript code for the figure and store it in a string origfacecolor = self.figure.get_facecolor() origedgecolor = self.figure.get_edgecolor() self.figure.set_facecolor(facecolor) self.figure.set_edgecolor(edgecolor) self._pswriter = StringIO() renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi) self.figure.draw(renderer) self.figure.set_facecolor(origfacecolor) self.figure.set_edgecolor(origedgecolor) # write the PostScript headers if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0" else: print >>fh, "%!PS-Adobe-3.0" if title: print >>fh, "%%Title: "+title print >>fh, ("%%Creator: matplotlib version " +__version__+", http://matplotlib.sourceforge.net/") print >>fh, "%%CreationDate: "+time.ctime(time.time()) print >>fh, "%%Orientation: " + orientation if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox if not isEPSF: print >>fh, "%%Pages: 1" print >>fh, "%%EndComments" Ndict = len(psDefs) print >>fh, "%%BeginProlog" if not rcParams['ps.useafm']: Ndict += len(renderer.used_characters) print >>fh, "/mpldict %d dict def"%Ndict print >>fh, "mpldict begin" for d in psDefs: d=d.strip() for l in d.split('\n'): print >>fh, l.strip() if not rcParams['ps.useafm']: for font_filename, chars in renderer.used_characters.values(): if len(chars): font = FT2Font(font_filename) cmap = font.get_charmap() glyph_ids = [] for c in chars: gind = cmap.get(c) or 0 glyph_ids.append(gind) # The ttf to ps (subsetting) support doesn't work for # OpenType fonts that are Postscript inside (like the # STIX fonts). This will simply turn that off to avoid # errors. if is_opentype_cff_font(font_filename): raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.") else: fonttype = rcParams['ps.fonttype'] convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids) print >>fh, "end" print >>fh, "%%EndProlog" if not isEPSF: print >>fh, "%%Page: 1 1" print >>fh, "mpldict begin" #print >>fh, "gsave" print >>fh, "%s translate"%_nums_to_str(xo, yo) if rotation: print >>fh, "%d rotate"%rotation print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0) # write the figure print >>fh, self._pswriter.getvalue() # write the trailer #print >>fh, "grestore" print >>fh, "end" print >>fh, "showpage" if not isEPSF: print >>fh, "%%EOF" fh.close() if rcParams['ps.usedistiller'] == 'ghostscript': gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox) elif rcParams['ps.usedistiller'] == 'xpdf': xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox) if passed_in_file_object: fh = file(tmpfile) print >>outfile, fh.read() else: shutil.move(tmpfile, outfile) def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor, orientation, isLandscape, papertype): """ If text.usetex is True in rc, a temporary pair of tex/eps files are created to allow tex to manage the text layout via the PSFrags package. These files are processed to yield the final ps or eps file. """ isEPSF = format == 'eps' title = outfile # write to a temp file, we'll move it to outfile when done tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest()) fh = file(tmpfile, 'w') self.figure.dpi = 72 # ignore the dpi kwarg width, height = self.figure.get_size_inches() xo = 0 yo = 0 l, b, w, h = self.figure.bbox.bounds llx = xo lly = yo urx = llx + w ury = lly + h bbox = (llx, lly, urx, ury) # generate PostScript code for the figure and store it in a string origfacecolor = self.figure.get_facecolor() origedgecolor = self.figure.get_edgecolor() self.figure.set_facecolor(facecolor) self.figure.set_edgecolor(edgecolor) self._pswriter = StringIO() renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi) self.figure.draw(renderer) self.figure.set_facecolor(origfacecolor) self.figure.set_edgecolor(origedgecolor) # write the Encapsulated PostScript headers print >>fh, "%!PS-Adobe-3.0 EPSF-3.0" if title: print >>fh, "%%Title: "+title print >>fh, ("%%Creator: matplotlib version " +__version__+", http://matplotlib.sourceforge.net/") print >>fh, "%%CreationDate: "+time.ctime(time.time()) print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox print >>fh, "%%EndComments" Ndict = len(psDefs) print >>fh, "%%BeginProlog" print >>fh, "/mpldict %d dict def"%Ndict print >>fh, "mpldict begin" for d in psDefs: d=d.strip() for l in d.split('\n'): print >>fh, l.strip() print >>fh, "end" print >>fh, "%%EndProlog" print >>fh, "mpldict begin" #print >>fh, "gsave" print >>fh, "%s translate"%_nums_to_str(xo, yo) print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0) # write the figure print >>fh, self._pswriter.getvalue() # write the trailer #print >>fh, "grestore" print >>fh, "end" print >>fh, "showpage" fh.close() if isLandscape: # now we are ready to rotate isLandscape = True width, height = height, width bbox = (lly, llx, ury, urx) temp_papertype = _get_papertype(width, height) if papertype=='auto': papertype = temp_papertype paperWidth, paperHeight = papersize[temp_papertype] else: paperWidth, paperHeight = papersize[papertype] if (width>paperWidth or height>paperHeight) and isEPSF: paperWidth, paperHeight = papersize[temp_papertype] verbose.report('Your figure is too big to fit on %s paper. %s \ paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful') texmanager = renderer.get_texmanager() font_preamble = texmanager.get_font_preamble() custom_preamble = texmanager.get_custom_preamble() convert_psfrags(tmpfile, renderer.psfrag, font_preamble, custom_preamble, paperWidth, paperHeight, orientation) if rcParams['ps.usedistiller'] == 'ghostscript': gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox) elif rcParams['ps.usedistiller'] == 'xpdf': xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox) elif rcParams['text.usetex']: if False: pass # for debugging else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox) if isinstance(outfile, file): fh = file(tmpfile) print >>outfile, fh.read() else: shutil.move(tmpfile, outfile) def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble, paperWidth, paperHeight, orientation): """ When we want to use the LaTeX backend with postscript, we write PSFrag tags to a temporary postscript file, each one marking a position for LaTeX to render some text. convert_psfrags generates a LaTeX document containing the commands to convert those tags to text. LaTeX/dvips produces the postscript file that includes the actual text. """ tmpdir = os.path.split(tmpfile)[0] epsfile = tmpfile+'.eps' shutil.move(tmpfile, epsfile) latexfile = tmpfile+'.tex' outfile = tmpfile+'.output' latexh = file(latexfile, 'w') dvifile = tmpfile+'.dvi' psfile = tmpfile+'.ps' if orientation=='landscape': angle = 90 else: angle = 0 if rcParams['text.latex.unicode']: unicode_preamble = """\usepackage{ucs} \usepackage[utf8x]{inputenc}""" else: unicode_preamble = '' s = r"""\documentclass{article} %s %s %s \usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry} \usepackage{psfrag} \usepackage[dvips]{graphicx} \usepackage{color} \pagestyle{empty} \begin{document} \begin{figure} \centering \leavevmode %s \includegraphics*[angle=%s]{%s} \end{figure} \end{document} """% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight, paperWidth, paperHeight, '\n'.join(psfrags), angle, os.path.split(epsfile)[-1]) if rcParams['text.latex.unicode']: latexh.write(s.encode('utf8')) else: try: latexh.write(s) except UnicodeEncodeError, err: verbose.report("You are using unicode and latex, but have " "not enabled the matplotlib 'text.latex.unicode' " "rcParam.", 'helpful') raise latexh.close() # the split drive part of the command is necessary for windows users with # multiple if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0] else: precmd = '' command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\ %(precmd, tmpdir, latexfile, outfile) verbose.report(command, 'debug') exit_status = os.system(command) fh = file(outfile) if exit_status: raise RuntimeError('LaTeX was not able to process your file:\ \nHere is the full report generated by LaTeX: \n\n%s'% fh.read()) else: verbose.report(fh.read(), 'debug') fh.close() os.remove(outfile) command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir, os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile) verbose.report(command, 'debug') exit_status = os.system(command) fh = file(outfile) if exit_status: raise RuntimeError('dvips was not able to \ process the following file:\n%s\nHere is the full report generated by dvips: \ \n\n'% dvifile + fh.read()) else: verbose.report(fh.read(), 'debug') fh.close() os.remove(outfile) os.remove(epsfile) shutil.move(psfile, tmpfile) if not debugPS: for fname in glob.glob(tmpfile+'.*'): os.remove(fname) def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None): """ Use ghostscript's pswrite or epswrite device to distill a file. This yields smaller files without illegal encapsulated postscript operators. The output is low-level, converting text to outlines. """ paper = '-sPAPERSIZE=%s'% ptype psfile = tmpfile + '.ps' outfile = tmpfile + '.output' dpi = rcParams['ps.distiller.res'] if sys.platform == 'win32': gs_exe = 'gswin32c' else: gs_exe = 'gs' command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \ "%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile) verbose.report(command, 'debug') exit_status = os.system(command) fh = file(outfile) if exit_status: raise RuntimeError('ghostscript was not able to process \ your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read()) else: verbose.report(fh.read(), 'debug') fh.close() os.remove(outfile) os.remove(tmpfile) shutil.move(psfile, tmpfile) if eps: pstoeps(tmpfile, bbox) def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None): """ Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file. This yields smaller files without illegal encapsulated postscript operators. This distiller is preferred, generating high-level postscript output that treats text as text. """ pdffile = tmpfile + '.pdf' psfile = tmpfile + '.ps' outfile = tmpfile + '.output' command = 'ps2pdf -dAutoFilterColorImages=false \ -sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \ (ptype, tmpfile, pdffile, outfile) if sys.platform == 'win32': command = command.replace('=', '#') verbose.report(command, 'debug') exit_status = os.system(command) fh = file(outfile) if exit_status: raise RuntimeError('ps2pdf was not able to process your \ image.\n\Here is the report generated by ghostscript:\n\n' + fh.read()) else: verbose.report(fh.read(), 'debug') fh.close() os.remove(outfile) command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \ (pdffile, psfile, outfile) verbose.report(command, 'debug') exit_status = os.system(command) fh = file(outfile) if exit_status: raise RuntimeError('pdftops was not able to process your \ image.\nHere is the full report generated by pdftops: \n\n' + fh.read()) else: verbose.report(fh.read(), 'debug') fh.close() os.remove(outfile) os.remove(tmpfile) shutil.move(psfile, tmpfile) if eps: pstoeps(tmpfile, bbox) for fname in glob.glob(tmpfile+'.*'): os.remove(fname) def get_bbox(tmpfile, bbox): """ Use ghostscript's bbox device to find the center of the bounding box. Return an appropriately sized bbox centered around that point. A bit of a hack. """ outfile = tmpfile + '.output' if sys.platform == 'win32': gs_exe = 'gswin32c' else: gs_exe = 'gs' command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\ (gs_exe, tmpfile) verbose.report(command, 'debug') stdin, stdout, stderr = os.popen3(command) verbose.report(stdout.read(), 'debug-annoying') bbox_info = stderr.read() verbose.report(bbox_info, 'helpful') bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info) if bbox_found: bbox_info = bbox_found.group() else: raise RuntimeError('Ghostscript was not able to extract a bounding box.\ Here is the Ghostscript output:\n\n%s'% bbox_info) l, b, r, t = [float(i) for i in bbox_info.split()[-4:]] # this is a hack to deal with the fact that ghostscript does not return the # intended bbox, but a tight bbox. For now, we just center the ink in the # intended bbox. This is not ideal, users may intend the ink to not be # centered. if bbox is None: l, b, r, t = (l-1, b-1, r+1, t+1) else: x = (l+r)/2 y = (b+t)/2 dx = (bbox[2]-bbox[0])/2 dy = (bbox[3]-bbox[1])/2 l,b,r,t = (x-dx, y-dy, x+dx, y+dy) bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t)) hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t) return '\n'.join([bbox_info, hires_bbox_info]) def pstoeps(tmpfile, bbox): """ Convert the postscript to encapsulated postscript. """ bbox_info = get_bbox(tmpfile, bbox) epsfile = tmpfile + '.eps' epsh = file(epsfile, 'w') tmph = file(tmpfile) line = tmph.readline() # Modify the header: while line: if line.startswith('%!PS'): print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0" print >>epsh, bbox_info elif line.startswith('%%EndComments'): epsh.write(line) print >>epsh, '%%BeginProlog' print >>epsh, 'save' print >>epsh, 'countdictstack' print >>epsh, 'mark' print >>epsh, 'newpath' print >>epsh, '/showpage {} def' print >>epsh, '/setpagedevice {pop} def' print >>epsh, '%%EndProlog' print >>epsh, '%%Page 1 1' break elif line.startswith('%%Bound') \ or line.startswith('%%HiResBound') \ or line.startswith('%%Pages'): pass else: epsh.write(line) line = tmph.readline() # Now rewrite the rest of the file, and modify the trailer. # This is done in a second loop such that the header of the embedded # eps file is not modified. line = tmph.readline() while line: if line.startswith('%%Trailer'): print >>epsh, '%%Trailer' print >>epsh, 'cleartomark' print >>epsh, 'countdictstack' print >>epsh, 'exch sub { end } repeat' print >>epsh, 'restore' if rcParams['ps.usedistiller'] == 'xpdf': # remove extraneous "end" operator: line = tmph.readline() else: epsh.write(line) line = tmph.readline() tmph.close() epsh.close() os.remove(tmpfile) shutil.move(epsfile, tmpfile) class FigureManagerPS(FigureManagerBase): pass FigureManager = FigureManagerPS # The following Python dictionary psDefs contains the entries for the # PostScript dictionary mpldict. This dictionary implements most of # the matplotlib primitives and some abbreviations. # # References: # http://www.adobe.com/products/postscript/pdfs/PLRM.pdf # http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/ # http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/ # # The usage comments use the notation of the operator summary # in the PostScript Language reference manual. psDefs = [ # x y *m* - "/m { moveto } bind def", # x y *l* - "/l { lineto } bind def", # x y *r* - "/r { rlineto } bind def", # x1 y1 x2 y2 x y *c* - "/c { curveto } bind def", # *closepath* - "/cl { closepath } bind def", # w h x y *box* - """/box { m 1 index 0 r 0 exch r neg 0 r cl } bind def""", # w h x y *clipbox* - """/clipbox { box clip newpath } bind def""", ]
agpl-3.0
nbfigueroa/daft
examples/galex.py
7
2540
""" The GALEX Photon Catalog ======================== This is the Hogg \& Schiminovich model for how photons turn into counts in the GALEX satellite data stream. Note the use of relative positioning. """ from matplotlib import rc rc("font", family="serif", size=12) rc("text", usetex=True) import daft pgm = daft.PGM([5.4, 5.4], origin=[1.2, 1.2]) wide = 1.5 verywide = 1.5 * wide dy = 0.75 # electrons el_x, el_y = 2., 2. pgm.add_plate(daft.Plate([el_x - 0.6, el_y - 0.6, 2.2, 2 * dy + 0.3], label="electrons $i$")) pgm.add_node(daft.Node("xabc", r"xa$_i$,xabc$_i$,ya$_i$,\textit{etc}", el_x + 0.5, el_y + 0 * dy, aspect=2.3 * wide, observed=True)) pgm.add_node(daft.Node("xyti", r"$x_i,y_i,t_i$", el_x + 1., el_y + 1 * dy, aspect=wide)) pgm.add_edge("xyti", "xabc") # intensity fields ph_x, ph_y = el_x + 2.5, el_y + 3 * dy pgm.add_node(daft.Node("Ixyt", r"$I_{\nu}(x,y,t)$", ph_x, ph_y, aspect=verywide)) pgm.add_edge("Ixyt", "xyti") pgm.add_node(daft.Node("Ixnt", r"$I_{\nu}(\xi,\eta,t)$", ph_x, ph_y + 1 * dy, aspect=verywide)) pgm.add_edge("Ixnt", "Ixyt") pgm.add_node(daft.Node("Iadt", r"$I_{\nu}(\alpha,\delta,t)$", ph_x, ph_y + 2 * dy, aspect=verywide)) pgm.add_edge("Iadt", "Ixnt") # s/c sc_x, sc_y = ph_x + 1.5, ph_y - 1.5 * dy pgm.add_node(daft.Node("dark", r"dark", sc_x, sc_y - 1 * dy, aspect=wide)) pgm.add_edge("dark", "xyti") pgm.add_node(daft.Node("flat", r"flat", sc_x, sc_y, aspect=wide)) pgm.add_edge("flat", "xyti") pgm.add_node(daft.Node("att", r"att", sc_x, sc_y + 3 * dy)) pgm.add_edge("att", "Ixnt") pgm.add_node(daft.Node("optics", r"optics", sc_x, sc_y + 2 * dy, aspect=wide)) pgm.add_edge("optics", "Ixyt") pgm.add_node(daft.Node("psf", r"psf", sc_x, sc_y + 1 * dy)) pgm.add_edge("psf", "xyti") pgm.add_node(daft.Node("fee", r"f.e.e.", sc_x, sc_y - 2 * dy, aspect=wide)) pgm.add_edge("fee", "xabc") # sky pgm.add_node(daft.Node("sky", r"sky", sc_x, sc_y + 4 * dy)) pgm.add_edge("sky", "Iadt") # stars star_x, star_y = el_x, el_y + 4 * dy pgm.add_plate(daft.Plate([star_x - 0.6, star_y - 0.6, 2.2, 2 * dy + 0.3], label="stars $n$")) pgm.add_node(daft.Node("star adt", r"$I_{\nu,n}(\alpha,\delta,t)$", star_x + 0.5, star_y + 1 * dy, aspect=verywide)) pgm.add_edge("star adt", "Iadt") pgm.add_node(daft.Node("star L", r"$L_{\nu,n}(t)$", star_x + 1, star_y, aspect=wide)) pgm.add_edge("star L", "star adt") pgm.add_node(daft.Node("star pos", r"$\vec{x_n}$", star_x, star_y)) pgm.add_edge("star pos", "star adt") # done pgm.render() pgm.figure.savefig("galex.pdf") pgm.figure.savefig("galex.png", dpi=150)
mit
Wittlich/DAT210x-Python
Module2/assignment4.py
1
1354
import pandas as pd # TODO: Load up the table, and extract the dataset # out of it. If you're having issues with this, look # carefully at the sample code provided in the reading df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2')[0] # TODO: Rename the columns so that they match the # column definitions provided to you on the website df.columns = df.ix[1, :] df = df.ix[2:, :] # TODO: Get rid of any row that has at least 4 NANs in it df = df.dropna(thresh=4, axis=0).reset_index(drop=True) # TODO: At this point, look through your dataset by printing # it. There probably still are some erroneous rows in there. # What indexing command(s) can you use to select all rows # EXCEPT those rows? df = df[df['RK'] != 'RK'] # TODO: Get rid of the 'RK' column df = df.drop(labels=['RK'], axis=1) # TODO: Ensure there are no holes in your index by resetting # it. By the way, don't store the original index df.reset_index(drop=True) # TODO: Check the data type of all columns, and ensure those # that should be numeric are numeric df = df.apply(lambda x: pd.to_numeric(x, errors='ignore')) # TODO: Your dataframe is now ready! Use the appropriate # commands to answer the questions on the course lab page. print(df.shape) print(len(df.PCT.unique())) print(df.ix[16, 'GP'] + df.ix[17, 'GP'])
mit
Tong-Chen/scikit-learn
sklearn/metrics/cluster/__init__.py
312
1322
""" The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for cluster analysis results. There are two forms of evaluation: - supervised, which uses a ground truth class values for each sample. - unsupervised, which does not and measures the 'quality' of the model itself. """ from .supervised import adjusted_mutual_info_score from .supervised import normalized_mutual_info_score from .supervised import adjusted_rand_score from .supervised import completeness_score from .supervised import contingency_matrix from .supervised import expected_mutual_information from .supervised import homogeneity_completeness_v_measure from .supervised import homogeneity_score from .supervised import mutual_info_score from .supervised import v_measure_score from .supervised import entropy from .unsupervised import silhouette_samples from .unsupervised import silhouette_score from .bicluster import consensus_score __all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score", "adjusted_rand_score", "completeness_score", "contingency_matrix", "expected_mutual_information", "homogeneity_completeness_v_measure", "homogeneity_score", "mutual_info_score", "v_measure_score", "entropy", "silhouette_samples", "silhouette_score", "consensus_score"]
bsd-3-clause
nixingyang/Kaggle-Face-Verification
Digit Recognizer/solution.py
1
3642
import numpy as np np.random.seed(666666) from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers.advanced_activations import PReLU from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.optimizers import SGD from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder import os import preprocessing import time MODEL_FOLDER_PATH = "./models" OPTIMAL_MODEL_FILE_PATH = os.path.join(MODEL_FOLDER_PATH, "optimal_model.hdf5") ROW_NUM = 28 COLUMN_NUM = 28 BATCH_SIZE = 128 def preprocess_images(X): X = np.reshape(X, (X.shape[0], ROW_NUM, COLUMN_NUM)) X = np.expand_dims(X, axis=1) return X / 255 def preprocess_labels(labels, encoder=None, categorical=True): if not encoder: encoder = LabelEncoder() encoder.fit(labels) categorical_labels = encoder.transform(labels).astype(np.int32) if categorical: categorical_labels = np_utils.to_categorical(categorical_labels) return categorical_labels, encoder def init_model(class_num): model = Sequential() model.add(Convolution2D(32, 3, 3, border_mode="same", input_shape=(1, ROW_NUM, COLUMN_NUM))) model.add(PReLU()) model.add(Convolution2D(32, 3, 3)) model.add(PReLU()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, 3, 3, border_mode="same")) model.add(PReLU()) model.add(Convolution2D(64, 3, 3)) model.add(PReLU()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(class_num)) model.add(Activation("softmax")) optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss="categorical_crossentropy", optimizer=optimizer) return model def run(): print("Loading data ...") X_train, Y_train, X_test, submission_file_content = preprocessing.load_data() print("Performing conversion ...") X_train = preprocess_images(X_train) X_test = preprocess_images(X_test) categorical_Y_train, encoder = preprocess_labels(Y_train) model = init_model(np.unique(Y_train).size) if not os.path.isfile(OPTIMAL_MODEL_FILE_PATH): print("Performing the training phase ...") if not os.path.isdir(MODEL_FOLDER_PATH): os.makedirs(MODEL_FOLDER_PATH) earlystopping_callback = EarlyStopping(patience=1) modelcheckpoint_callback = ModelCheckpoint(OPTIMAL_MODEL_FILE_PATH, save_best_only=True) model.fit(X_train, categorical_Y_train, batch_size=BATCH_SIZE, nb_epoch=1, callbacks=[earlystopping_callback, modelcheckpoint_callback], validation_split=0.2, show_accuracy=True) print("Loading the optimal model ...") model.load_weights(OPTIMAL_MODEL_FILE_PATH) print("Generating prediction ...") temp_predictions = model.predict(X_test, batch_size=BATCH_SIZE) prediction = encoder.inverse_transform(temp_predictions) print("Writing prediction to disk ...") submission_file_name = "Aurora_{:.4f}_{:d}.csv".format(EarlyStopping.best, int(time.time())) submission_file_content[preprocessing.LABEL_COLUMN_NAME_IN_SUBMISSION] = prediction submission_file_content.to_csv(submission_file_name, index=False) print("All done!") if __name__ == "__main__": run()
mit
spbguru/repo1
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py
69
20664
from __future__ import division import math import os import sys import matplotlib from matplotlib import verbose from matplotlib.cbook import is_string_like, onetrue from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \ FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors from matplotlib._pylab_helpers import Gcf from matplotlib.figure import Figure from matplotlib.mathtext import MathTextParser from matplotlib.widgets import SubplotTool try: from PyQt4 import QtCore, QtGui, Qt except ImportError: raise ImportError("Qt4 backend requires that PyQt4 is installed.") backend_version = "0.9.1" def fn_name(): return sys._getframe(1).f_code.co_name DEBUG = False cursord = { cursors.MOVE : QtCore.Qt.SizeAllCursor, cursors.HAND : QtCore.Qt.PointingHandCursor, cursors.POINTER : QtCore.Qt.ArrowCursor, cursors.SELECT_REGION : QtCore.Qt.CrossCursor, } def draw_if_interactive(): """ Is called after every pylab drawing command """ if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager != None: figManager.canvas.draw() def _create_qApp(): """ Only one qApp can exist at a time, so check before creating one. """ if QtGui.QApplication.startingUp(): if DEBUG: print "Starting up QApplication" global qApp qApp = QtGui.QApplication( [" "] ) QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ), qApp, QtCore.SLOT( "quit()" ) ) #remember that matplotlib created the qApp - will be used by show() _create_qApp.qAppCreatedHere = True _create_qApp.qAppCreatedHere = False def show(): """ Show all the figures and enter the qt main loop This should be the last line of your script """ for manager in Gcf.get_all_fig_managers(): manager.window.show() if DEBUG: print 'Inside show' figManager = Gcf.get_active() if figManager != None: figManager.canvas.draw() if _create_qApp.qAppCreatedHere: QtGui.qApp.exec_() def new_figure_manager( num, *args, **kwargs ): """ Create a new figure manager instance """ thisFig = Figure( *args, **kwargs ) canvas = FigureCanvasQT( thisFig ) manager = FigureManagerQT( canvas, num ) return manager class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ): keyvald = { QtCore.Qt.Key_Control : 'control', QtCore.Qt.Key_Shift : 'shift', QtCore.Qt.Key_Alt : 'alt', } # left 1, middle 2, right 3 buttond = {1:1, 2:3, 4:2} def __init__( self, figure ): if DEBUG: print 'FigureCanvasQt: ', figure _create_qApp() QtGui.QWidget.__init__( self ) FigureCanvasBase.__init__( self, figure ) self.figure = figure self.setMouseTracking( True ) # hide until we can test and fix #self.startTimer(backend_IdleEvent.milliseconds) w,h = self.get_width_height() self.resize( w, h ) def __timerEvent(self, event): # hide until we can test and fix self.mpl_idle_event(event) def enterEvent(self, event): FigureCanvasBase.enter_notify_event(self, event) def leaveEvent(self, event): FigureCanvasBase.leave_notify_event(self, event) def mousePressEvent( self, event ): x = event.pos().x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.pos().y() button = self.buttond[event.button()] FigureCanvasBase.button_press_event( self, x, y, button ) if DEBUG: print 'button pressed:', event.button() def mouseMoveEvent( self, event ): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() FigureCanvasBase.motion_notify_event( self, x, y ) #if DEBUG: print 'mouse move' def mouseReleaseEvent( self, event ): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() button = self.buttond[event.button()] FigureCanvasBase.button_release_event( self, x, y, button ) if DEBUG: print 'button released' def keyPressEvent( self, event ): key = self._get_key( event ) FigureCanvasBase.key_press_event( self, key ) if DEBUG: print 'key press', key def keyReleaseEvent( self, event ): key = self._get_key(event) FigureCanvasBase.key_release_event( self, key ) if DEBUG: print 'key release', key def resizeEvent( self, event ): if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height()) QtGui.QWidget.resizeEvent( self, event ) w = event.size().width() h = event.size().height() if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")" dpival = self.figure.dpi winch = w/dpival hinch = h/dpival self.figure.set_size_inches( winch, hinch ) self.draw() def resize( self, w, h ): # Pass through to Qt to resize the widget. QtGui.QWidget.resize( self, w, h ) # Resize the figure by converting pixels to inches. pixelPerInch = self.figure.dpi wInch = w / pixelPerInch hInch = h / pixelPerInch self.figure.set_size_inches( wInch, hInch ) # Redraw everything. self.draw() def sizeHint( self ): w, h = self.get_width_height() return QtCore.QSize( w, h ) def minumumSizeHint( self ): return QtCore.QSize( 10, 10 ) def _get_key( self, event ): if event.key() < 256: key = str(event.text()) elif event.key() in self.keyvald: key = self.keyvald[ event.key() ] else: key = None return key def flush_events(self): Qt.qApp.processEvents() def start_event_loop(self,timeout): FigureCanvasBase.start_event_loop_default(self,timeout) start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__ def stop_event_loop(self): FigureCanvasBase.stop_event_loop_default(self) stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__ class FigureManagerQT( FigureManagerBase ): """ Public attributes canvas : The FigureCanvas instance num : The Figure number toolbar : The qt.QToolBar window : The qt.QMainWindow """ def __init__( self, canvas, num ): if DEBUG: print 'FigureManagerQT.%s' % fn_name() FigureManagerBase.__init__( self, canvas, num ) self.canvas = canvas self.window = QtGui.QMainWindow() self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.window.setWindowTitle("Figure %d" % num) image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' ) self.window.setWindowIcon(QtGui.QIcon( image )) # Give the keyboard focus to the figure instead of the manager self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus ) self.canvas.setFocus() QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ), self._widgetclosed ) self.window._destroying = False self.toolbar = self._get_toolbar(self.canvas, self.window) self.window.addToolBar(self.toolbar) QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"), self.window.statusBar().showMessage) self.window.setCentralWidget(self.canvas) if matplotlib.is_interactive(): self.window.show() # attach a show method to the figure for pylab ease of use self.canvas.figure.show = lambda *args: self.window.show() def notify_axes_change( fig ): # This will be called whenever the current axes is changed if self.toolbar != None: self.toolbar.update() self.canvas.figure.add_axobserver( notify_axes_change ) def _widgetclosed( self ): if self.window._destroying: return self.window._destroying = True Gcf.destroy(self.num) def _get_toolbar(self, canvas, parent): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar'] == 'classic': print "Classic toolbar is not supported" elif matplotlib.rcParams['toolbar'] == 'toolbar2': toolbar = NavigationToolbar2QT(canvas, parent, False) else: toolbar = None return toolbar def resize(self, width, height): 'set the canvas size in pixels' self.window.resize(width, height) def destroy( self, *args ): if self.window._destroying: return self.window._destroying = True QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ), self._widgetclosed ) if self.toolbar: self.toolbar.destroy() if DEBUG: print "destroy figure manager" self.window.close() def set_window_title(self, title): self.window.setWindowTitle(title) class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ): def __init__(self, canvas, parent, coordinates=True): """ coordinates: should we show the coordinates on the right? """ self.canvas = canvas self.coordinates = coordinates QtGui.QToolBar.__init__( self, parent ) NavigationToolbar2.__init__( self, canvas ) def _icon(self, name): return QtGui.QIcon(os.path.join(self.basedir, name)) def _init_toolbar(self): self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images') a = self.addAction(self._icon('home.svg'), 'Home', self.home) a.setToolTip('Reset original view') a = self.addAction(self._icon('back.svg'), 'Back', self.back) a.setToolTip('Back to previous view') a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward) a.setToolTip('Forward to next view') self.addSeparator() a = self.addAction(self._icon('move.svg'), 'Pan', self.pan) a.setToolTip('Pan axes with left mouse, zoom with right') a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom) a.setToolTip('Zoom to rectangle') self.addSeparator() a = self.addAction(self._icon('subplots.png'), 'Subplots', self.configure_subplots) a.setToolTip('Configure subplots') a = self.addAction(self._icon('filesave.svg'), 'Save', self.save_figure) a.setToolTip('Save the figure') self.buttons = {} # Add the x,y location widget at the right side of the toolbar # The stretch factor is 1 which means any resizing of the toolbar # will resize this label instead of the buttons. if self.coordinates: self.locLabel = QtGui.QLabel( "", self ) self.locLabel.setAlignment( QtCore.Qt.AlignRight | QtCore.Qt.AlignTop ) self.locLabel.setSizePolicy( QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Ignored)) labelAction = self.addWidget(self.locLabel) labelAction.setVisible(True) # reference holder for subplots_adjust window self.adj_window = None def dynamic_update( self ): self.canvas.draw() def set_message( self, s ): self.emit(QtCore.SIGNAL("message"), s) if self.coordinates: self.locLabel.setText(s.replace(', ', '\n')) def set_cursor( self, cursor ): if DEBUG: print 'Set cursor' , cursor QtGui.QApplication.restoreOverrideCursor() QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) ) def draw_rubberband( self, event, x0, y0, x1, y1 ): height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 w = abs(x1 - x0) h = abs(y1 - y0) rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ] self.canvas.drawRectangle( rect ) def configure_subplots(self): self.adj_window = QtGui.QMainWindow() win = self.adj_window win.setAttribute(QtCore.Qt.WA_DeleteOnClose) win.setWindowTitle("Subplot Configuration Tool") image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' ) win.setWindowIcon(QtGui.QIcon( image )) tool = SubplotToolQt(self.canvas.figure, win) win.setCentralWidget(tool) win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) win.show() def _get_canvas(self, fig): return FigureCanvasQT(fig) def save_figure( self ): filetypes = self.canvas.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() default_filetype = self.canvas.get_default_filetype() start = "image." + default_filetype filters = [] selectedFilter = None for name, exts in sorted_filetypes: exts_list = " ".join(['*.%s' % ext for ext in exts]) filter = '%s (%s)' % (name, exts_list) if default_filetype in exts: selectedFilter = filter filters.append(filter) filters = ';;'.join(filters) fname = QtGui.QFileDialog.getSaveFileName( self, "Choose a filename to save to", start, filters, selectedFilter) if fname: try: self.canvas.print_figure( unicode(fname) ) except Exception, e: QtGui.QMessageBox.critical( self, "Error saving file", str(e), QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton) class SubplotToolQt( SubplotTool, QtGui.QWidget ): def __init__(self, targetfig, parent): QtGui.QWidget.__init__(self, None) self.targetfig = targetfig self.parent = parent self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal) self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical) self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal) self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical) self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal) self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical) # constraints QtCore.QObject.connect( self.sliderleft, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderright.setMinimum ) QtCore.QObject.connect( self.sliderright, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderleft.setMaximum ) QtCore.QObject.connect( self.sliderbottom, QtCore.SIGNAL( "valueChanged(int)" ), self.slidertop.setMinimum ) QtCore.QObject.connect( self.slidertop, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderbottom.setMaximum ) sliders = (self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop, self.sliderwspace, self.sliderhspace, ) adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:') for slider, adjustment in zip(sliders, adjustments): slider.setMinimum(0) slider.setMaximum(1000) slider.setSingleStep(5) layout = QtGui.QGridLayout() leftlabel = QtGui.QLabel('left') layout.addWidget(leftlabel, 2, 0) layout.addWidget(self.sliderleft, 2, 1) toplabel = QtGui.QLabel('top') layout.addWidget(toplabel, 0, 2) layout.addWidget(self.slidertop, 1, 2) layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter) bottomlabel = QtGui.QLabel('bottom') layout.addWidget(QtGui.QLabel('bottom'), 4, 2) layout.addWidget(self.sliderbottom, 3, 2) layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter) rightlabel = QtGui.QLabel('right') layout.addWidget(rightlabel, 2, 4) layout.addWidget(self.sliderright, 2, 3) hspacelabel = QtGui.QLabel('hspace') layout.addWidget(hspacelabel, 0, 6) layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter) layout.addWidget(self.sliderhspace, 1, 6) layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter) wspacelabel = QtGui.QLabel('wspace') layout.addWidget(wspacelabel, 4, 6) layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter) layout.addWidget(self.sliderwspace, 3, 6) layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom) layout.setRowStretch(1,1) layout.setRowStretch(3,1) layout.setColumnStretch(1,1) layout.setColumnStretch(3,1) layout.setColumnStretch(6,1) self.setLayout(layout) self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000)) self.sliderbottom.setSliderPosition(\ int(targetfig.subplotpars.bottom*1000)) self.sliderright.setSliderPosition(\ int(targetfig.subplotpars.right*1000)) self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000)) self.sliderwspace.setSliderPosition(\ int(targetfig.subplotpars.wspace*1000)) self.sliderhspace.setSliderPosition(\ int(targetfig.subplotpars.hspace*1000)) QtCore.QObject.connect( self.sliderleft, QtCore.SIGNAL( "valueChanged(int)" ), self.funcleft ) QtCore.QObject.connect( self.sliderbottom, QtCore.SIGNAL( "valueChanged(int)" ), self.funcbottom ) QtCore.QObject.connect( self.sliderright, QtCore.SIGNAL( "valueChanged(int)" ), self.funcright ) QtCore.QObject.connect( self.slidertop, QtCore.SIGNAL( "valueChanged(int)" ), self.functop ) QtCore.QObject.connect( self.sliderwspace, QtCore.SIGNAL( "valueChanged(int)" ), self.funcwspace ) QtCore.QObject.connect( self.sliderhspace, QtCore.SIGNAL( "valueChanged(int)" ), self.funchspace ) def funcleft(self, val): if val == self.sliderright.value(): val -= 1 self.targetfig.subplots_adjust(left=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcright(self, val): if val == self.sliderleft.value(): val += 1 self.targetfig.subplots_adjust(right=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcbottom(self, val): if val == self.slidertop.value(): val -= 1 self.targetfig.subplots_adjust(bottom=val/1000.) if self.drawon: self.targetfig.canvas.draw() def functop(self, val): if val == self.sliderbottom.value(): val += 1 self.targetfig.subplots_adjust(top=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcwspace(self, val): self.targetfig.subplots_adjust(wspace=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funchspace(self, val): self.targetfig.subplots_adjust(hspace=val/1000.) if self.drawon: self.targetfig.canvas.draw() def error_msg_qt( msg, parent=None ): if not is_string_like( msg ): msg = ','.join( map( str,msg ) ) QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok ) def exception_handler( type, value, tb ): """Handle uncaught exceptions It does not catch SystemExit """ msg = '' # get the filename attribute if available (for IOError) if hasattr(value, 'filename') and value.filename != None: msg = value.filename + ': ' if hasattr(value, 'strerror') and value.strerror != None: msg += value.strerror else: msg += str(value) if len( msg ) : error_msg_qt( msg ) FigureManager = FigureManagerQT
gpl-3.0
Winand/pandas
pandas/core/dtypes/cast.py
2
36135
""" routings for casting """ from datetime import datetime, timedelta import numpy as np import warnings from pandas._libs import tslib, lib from pandas._libs.tslib import iNaT from pandas.compat import string_types, text_type, PY3 from .common import (_ensure_object, is_bool, is_integer, is_float, is_complex, is_datetimetz, is_categorical_dtype, is_datetimelike, is_extension_type, is_object_dtype, is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, is_dtype_equal, is_float_dtype, is_complex_dtype, is_integer_dtype, is_datetime_or_timedelta_dtype, is_bool_dtype, is_scalar, _string_dtypes, pandas_dtype, _ensure_int8, _ensure_int16, _ensure_int32, _ensure_int64, _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE, _POSSIBLY_CAST_DTYPES) from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype from .generic import (ABCDatetimeIndex, ABCPeriodIndex, ABCSeries) from .missing import isna, notna from .inference import is_list_like _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max _int64_max = np.iinfo(np.int64).max def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple)): values = lib.list_to_object_array(list(values)) if getattr(values, 'dtype', None) == np.object_: if hasattr(values, '_values'): values = values._values values = lib.maybe_convert_objects(values) return values def is_nested_object(obj): """ return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant. """ if isinstance(obj, ABCSeries) and is_object_dtype(obj): if any(isinstance(v, ABCSeries) for v in obj.values): return True return False def maybe_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ if is_scalar(result): return result def trans(x): return x if isinstance(dtype, string_types): if dtype == 'infer': inferred_type = lib.infer_dtype(_ensure_object(result.ravel())) if inferred_type == 'boolean': dtype = 'bool' elif inferred_type == 'integer': dtype = 'int64' elif inferred_type == 'datetime64': dtype = 'datetime64[ns]' elif inferred_type == 'timedelta64': dtype = 'timedelta64[ns]' # try to upcast here elif inferred_type == 'floating': dtype = 'int64' if issubclass(result.dtype.type, np.number): def trans(x): # noqa return x.round() else: dtype = 'object' if isinstance(dtype, string_types): dtype = np.dtype(dtype) try: # don't allow upcasts here (except if empty) if dtype.kind == result.dtype.kind: if (result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape)): return result if is_bool_dtype(dtype) or is_integer_dtype(dtype): # if we don't have any elements, just astype it if not np.prod(result.shape): return trans(result).astype(dtype) # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([r[0]]) # if we have any nulls, then we are done if (isna(arr).any() or not np.allclose(arr, trans(arr).astype(dtype), rtol=0)): return result # a comparable, e.g. a Decimal may slip in here elif not isinstance(r[0], (np.integer, np.floating, np.bool, int, float, bool)): return result if (issubclass(result.dtype.type, (np.object_, np.number)) and notna(result).all()): new_result = trans(result).astype(dtype) try: if np.allclose(new_result, result, rtol=0): return new_result except: # comparison of an object dtype with a number type could # hit here if (new_result == result).all(): return new_result elif (issubclass(dtype.type, np.floating) and not is_bool_dtype(result.dtype)): return result.astype(dtype) # a datetimelike # GH12821, iNaT is casted to float elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']: try: result = result.astype(dtype) except: if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize('utc') result = result.tz_convert(dtype.tz) except: pass return result def maybe_upcast_putmask(result, mask, other): """ A safe version of putmask that potentially upcasts the result Parameters ---------- result : ndarray The destination array. This will be mutated in-place if no upcasting is necessary. mask : boolean ndarray other : ndarray or scalar The source array or value Returns ------- result : ndarray changed : boolean Set to true if the result array was upcasted """ if mask.any(): # Two conversions for date-like dtypes that can't be done automatically # in np.place: # NaN -> NaT # integer or integer array -> date-like array if is_datetimelike(result.dtype): if is_scalar(other): if isna(other): other = result.dtype.type('nat') elif is_integer(other): other = np.array(other, dtype=result.dtype) elif is_integer_dtype(other): other = np.array(other, dtype=result.dtype) def changeit(): # try to directly set by expanding our array to full # length of the boolean try: om = other[mask] om_at = om.astype(result.dtype) if (om == om_at).all(): new_result = result.values.copy() new_result[mask] = om_at result[:] = new_result return result, False except: pass # we are forced to change the dtype of the result as the input # isn't compatible r, _ = maybe_upcast(result, fill_value=other, copy=True) np.place(r, mask, other) return r, True # we want to decide whether place will work # if we have nans in the False portion of our mask then we need to # upcast (possibly), otherwise we DON't want to upcast (e.g. if we # have values, say integers, in the success portion then it's ok to not # upcast) new_dtype, _ = maybe_promote(result.dtype, other) if new_dtype != result.dtype: # we have a scalar or len 0 ndarray # and its nan and we are changing some values if (is_scalar(other) or (isinstance(other, np.ndarray) and other.ndim < 1)): if isna(other): return changeit() # we have an ndarray and the masking has nans in it else: if isna(other[mask]).any(): return changeit() try: np.place(result, mask, other) except: return changeit() return result, False def maybe_promote(dtype, fill_value=np.nan): # if we passed an array here, determine the fill value by dtype if isinstance(fill_value, np.ndarray): if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)): fill_value = iNaT else: # we need to change to object type as our # fill_value is of object type if fill_value.dtype == np.object_: dtype = np.dtype(np.object_) fill_value = np.nan # returns tuple of (dtype, fill_value) if issubclass(dtype.type, (np.datetime64, np.timedelta64)): # for now: refuse to upcast datetime64 # (this is because datetime64 will not implicitly upconvert # to object correctly as of numpy 1.6.1) if isna(fill_value): fill_value = iNaT else: if issubclass(dtype.type, np.datetime64): try: fill_value = tslib.Timestamp(fill_value).value except: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: fill_value = lib.Timedelta(fill_value).value except: # as for datetimes, cannot upcast to object fill_value = iNaT else: fill_value = iNaT elif is_datetimetz(dtype): if isna(fill_value): fill_value = iNaT elif is_float(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ elif issubclass(dtype.type, np.integer): dtype = np.float64 elif is_bool(fill_value): if not issubclass(dtype.type, np.bool_): dtype = np.object_ elif is_integer(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ elif issubclass(dtype.type, np.integer): # upcast to prevent overflow arr = np.asarray(fill_value) if arr != arr.astype(dtype): dtype = arr.dtype elif is_complex(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ elif issubclass(dtype.type, (np.integer, np.floating)): dtype = np.complex128 elif fill_value is None: if is_float_dtype(dtype) or is_complex_dtype(dtype): fill_value = np.nan elif is_integer_dtype(dtype): dtype = np.float64 fill_value = np.nan elif is_datetime_or_timedelta_dtype(dtype): fill_value = iNaT else: dtype = np.object_ else: dtype = np.object_ # in case we have a string that looked like a number if is_categorical_dtype(dtype): pass elif is_datetimetz(dtype): pass elif issubclass(np.dtype(dtype).type, string_types): dtype = np.object_ return dtype, fill_value def infer_dtype_from(val, pandas_dtype=False): """ interpret the dtype from a scalar or array. This is a convenience routines to infer dtype from a scalar or an array Parameters ---------- pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, scalar/array belongs to pandas extension types is inferred as object """ if is_scalar(val): return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype) return infer_dtype_from_array(val, pandas_dtype=pandas_dtype) def infer_dtype_from_scalar(val, pandas_dtype=False): """ interpret the dtype from a scalar Parameters ---------- pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, scalar belongs to pandas extension types is inferred as object """ dtype = np.object_ # a 1-element ndarray if isinstance(val, np.ndarray): msg = "invalid ndarray passed to _infer_dtype_from_scalar" if val.ndim != 0: raise ValueError(msg) dtype = val.dtype val = val.item() elif isinstance(val, string_types): # If we create an empty array using a string to infer # the dtype, NumPy will only allocate one character per entry # so this is kind of bad. Alternately we could use np.repeat # instead of np.empty (but then you still don't want things # coming out as np.str_! dtype = np.object_ elif isinstance(val, (np.datetime64, datetime)): val = tslib.Timestamp(val) if val is tslib.NaT or val.tz is None: dtype = np.dtype('M8[ns]') else: if pandas_dtype: dtype = DatetimeTZDtype(unit='ns', tz=val.tz) else: # return datetimetz as object return np.object_, val val = val.value elif isinstance(val, (np.timedelta64, timedelta)): val = tslib.Timedelta(val).value dtype = np.dtype('m8[ns]') elif is_bool(val): dtype = np.bool_ elif is_integer(val): if isinstance(val, np.integer): dtype = type(val) else: dtype = np.int64 elif is_float(val): if isinstance(val, np.floating): dtype = type(val) else: dtype = np.float64 elif is_complex(val): dtype = np.complex_ elif pandas_dtype: if lib.is_period(val): dtype = PeriodDtype(freq=val.freq) val = val.ordinal return dtype, val def infer_dtype_from_array(arr, pandas_dtype=False): """ infer the dtype from a scalar or array Parameters ---------- arr : scalar or array pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, array belongs to pandas extension types is inferred as object Returns ------- tuple (numpy-compat/pandas-compat dtype, array) Notes ----- if pandas_dtype=False. these infer to numpy dtypes exactly with the exception that mixed / object dtypes are not coerced by stringifying or conversion if pandas_dtype=True. datetime64tz-aware/categorical types will retain there character. Examples -------- >>> np.asarray([1, '1']) array(['1', '1'], dtype='<U21') >>> infer_dtype_from_array([1, '1']) (numpy.object_, [1, '1']) """ if isinstance(arr, np.ndarray): return arr.dtype, arr if not is_list_like(arr): arr = [arr] if pandas_dtype and is_extension_type(arr): return arr.dtype, arr elif isinstance(arr, ABCSeries): return arr.dtype, np.asarray(arr) # don't force numpy coerce with nan's inferred = lib.infer_dtype(arr) if inferred in ['string', 'bytes', 'unicode', 'mixed', 'mixed-integer']: return (np.object_, arr) arr = np.asarray(arr) return arr.dtype, arr def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): """ provide explict type promotion and coercion Parameters ---------- values : the ndarray that we want to maybe upcast fill_value : what we want to fill with dtype : if None, then use the dtype of the values, else coerce to this type copy : if True always make a copy even if no upcast is required """ if is_extension_type(values): if copy: values = values.copy() else: if dtype is None: dtype = values.dtype new_dtype, fill_value = maybe_promote(dtype, fill_value) if new_dtype != values.dtype: values = values.astype(new_dtype) elif copy: values = values.copy() return values, fill_value def maybe_cast_item(obj, item, dtype): chunk = obj[item] if chunk.values.dtype != dtype: if dtype in (np.object_, np.bool_): obj[item] = chunk.astype(np.object_) elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover raise ValueError("Unexpected dtype encountered: {dtype}" .format(dtype=dtype)) def invalidate_string_dtypes(dtype_set): """Change string like dtypes to object for ``DataFrame.select_dtypes()``. """ non_string_dtypes = dtype_set - _string_dtypes if non_string_dtypes != dtype_set: raise TypeError("string dtypes are not allowed, use 'object' instead") def maybe_convert_string_to_object(values): """ Convert string-like and string-like array to convert object dtype. This is to avoid numpy to handle the array as str dtype. """ if isinstance(values, string_types): values = np.array([values], dtype=object) elif (isinstance(values, np.ndarray) and issubclass(values.dtype.type, (np.string_, np.unicode_))): values = values.astype(object) return values def maybe_convert_scalar(values): """ Convert a python scalar to the appropriate numpy dtype if possible This avoids numpy directly converting according to platform preferences """ if is_scalar(values): dtype, values = infer_dtype_from_scalar(values) try: values = dtype(values) except TypeError: pass return values def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ l = len(categories) if l < _int8_max: return _ensure_int8(indexer) elif l < _int16_max: return _ensure_int16(indexer) elif l < _int32_max: return _ensure_int32(indexer) return _ensure_int64(indexer) def coerce_to_dtypes(result, dtypes): """ given a dtypes and a result set, coerce the result elements to the dtypes """ if len(result) != len(dtypes): raise AssertionError("_coerce_to_dtypes requires equal len arrays") from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type def conv(r, dtype): try: if isna(r): pass elif dtype == _NS_DTYPE: r = tslib.Timestamp(r) elif dtype == _TD_DTYPE: r = _coerce_scalar_to_timedelta_type(r) elif dtype == np.bool_: # messy. non 0/1 integers do not get converted. if is_integer(r) and r not in [0, 1]: return int(r) r = bool(r) elif dtype.kind == 'f': r = float(r) elif dtype.kind == 'i': r = int(r) except: pass return r return [conv(r, dtype) for r, dtype in zip(result, dtypes)] def astype_nansafe(arr, dtype, copy=True): """ return a view if copy is False, but need to be very careful as the result shape could change! """ if not isinstance(dtype, np.dtype): dtype = pandas_dtype(dtype) if issubclass(dtype.type, text_type): # in Py3 that's str, in Py2 that's unicode return lib.astype_unicode(arr.ravel()).reshape(arr.shape) elif issubclass(dtype.type, string_types): return lib.astype_str(arr.ravel()).reshape(arr.shape) elif is_datetime64_dtype(arr): if dtype == object: return tslib.ints_to_pydatetime(arr.view(np.int64)) elif dtype == np.int64: return arr.view(dtype) elif dtype != _NS_DTYPE: raise TypeError("cannot astype a datetimelike from [{from_dtype}] " "to [{to_dtype}]".format(from_dtype=arr.dtype, to_dtype=dtype)) return arr.astype(_NS_DTYPE) elif is_timedelta64_dtype(arr): if dtype == np.int64: return arr.view(dtype) elif dtype == object: return tslib.ints_to_pytimedelta(arr.view(np.int64)) # in py3, timedelta64[ns] are int64 elif ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or (not PY3 and dtype != _TD_DTYPE)): # allow frequency conversions if dtype.kind == 'm': mask = isna(arr) result = arr.astype(dtype).astype(np.float64) result[mask] = np.nan return result raise TypeError("cannot astype a timedelta from [{from_dtype}] " "to [{to_dtype}]".format(from_dtype=arr.dtype, to_dtype=dtype)) return arr.astype(_TD_DTYPE) elif (np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer)): if not np.isfinite(arr).all(): raise ValueError('Cannot convert non-finite values (NA or inf) to ' 'integer') elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer): # work around NumPy brokenness, #1987 return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) if dtype.name in ("datetime64", "timedelta64"): msg = ("Passing in '{dtype}' dtype with no frequency is " "deprecated and will raise in a future version. " "Please pass in '{dtype}[ns]' instead.") warnings.warn(msg.format(dtype=dtype.name), FutureWarning, stacklevel=5) dtype = np.dtype(dtype.name + "[ns]") if copy: return arr.astype(dtype) return arr.view(dtype) def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True): """ if we have an object dtype, try to coerce dates and/or numbers """ # if we have passed in a list or scalar if isinstance(values, (list, tuple)): values = np.array(values, dtype=np.object_) if not hasattr(values, 'dtype'): values = np.array([values], dtype=np.object_) # convert dates if convert_dates and values.dtype == np.object_: # we take an aggressive stance and convert to datetime64[ns] if convert_dates == 'coerce': new_values = maybe_cast_to_datetime( values, 'M8[ns]', errors='coerce') # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects(values, convert_datetime=convert_dates) # convert timedeltas if convert_timedeltas and values.dtype == np.object_: if convert_timedeltas == 'coerce': from pandas.core.tools.timedeltas import to_timedelta new_values = to_timedelta(values, errors='coerce') # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects( values, convert_timedelta=convert_timedeltas) # convert to numeric if values.dtype == np.object_: if convert_numeric: try: new_values = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values except: pass else: # soft-conversion values = lib.maybe_convert_objects(values) values = values.copy() if copy else values return values def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True, coerce=False, copy=True): """ if we have an object dtype, try to coerce dates and/or numbers """ conversion_count = sum((datetime, numeric, timedelta)) if conversion_count == 0: raise ValueError('At least one of datetime, numeric or timedelta must ' 'be True.') elif conversion_count > 1 and coerce: raise ValueError("Only one of 'datetime', 'numeric' or " "'timedelta' can be True when when coerce=True.") if isinstance(values, (list, tuple)): # List or scalar values = np.array(values, dtype=np.object_) elif not hasattr(values, 'dtype'): values = np.array([values], dtype=np.object_) elif not is_object_dtype(values.dtype): # If not object, do not attempt conversion values = values.copy() if copy else values return values # If 1 flag is coerce, ensure 2 others are False if coerce: # Immediate return if coerce if datetime: from pandas import to_datetime return to_datetime(values, errors='coerce', box=False) elif timedelta: from pandas import to_timedelta return to_timedelta(values, errors='coerce', box=False) elif numeric: from pandas import to_numeric return to_numeric(values, errors='coerce') # Soft conversions if datetime: values = lib.maybe_convert_objects(values, convert_datetime=datetime) if timedelta and is_object_dtype(values.dtype): # Object check to ensure only run if previous did not convert values = lib.maybe_convert_objects(values, convert_timedelta=timedelta) if numeric and is_object_dtype(values.dtype): try: converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values values = values.copy() if copy else values except: pass return values def maybe_castable(arr): # return False to force a non-fastpath # check datetime64[ns]/timedelta64[ns] are valid # otherwise try to coerce kind = arr.dtype.kind if kind == 'M' or kind == 'm': return is_datetime64_dtype(arr.dtype) return arr.dtype.name not in _POSSIBLY_CAST_DTYPES def maybe_infer_to_datetimelike(value, convert_dates=False): """ we might have a array (or single object) that is datetime like, and no dtype is passed don't change the value unless we find a datetime/timedelta set this is pretty strict in that a datetime/timedelta is REQUIRED in addition to possible nulls/string likes Parameters ---------- value : np.array / Series / Index / list-like convert_dates : boolean, default False if True try really hard to convert dates (such as datetime.date), other leave inferred dtype 'date' alone """ if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)): return value elif isinstance(value, ABCSeries): if isinstance(value._values, ABCDatetimeIndex): return value._values v = value if not is_list_like(v): v = [v] v = np.array(v, copy=False) # we only care about object dtypes if not is_object_dtype(v): return value shape = v.shape if not v.ndim == 1: v = v.ravel() if not len(v): return value def try_datetime(v): # safe coerce to datetime64 try: v = tslib.array_to_datetime(v, errors='raise') except ValueError: # we might have a sequence of the same-datetimes with tz's # if so coerce to a DatetimeIndex; if they are not the same, # then these stay as object dtype try: from pandas import to_datetime return to_datetime(v) except: pass except: pass return v.reshape(shape) def try_timedelta(v): # safe coerce to timedelta64 # will try first with a string & object conversion from pandas import to_timedelta try: return to_timedelta(v)._values.reshape(shape) except: return v.reshape(shape) inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) if inferred_type == 'date' and convert_dates: value = try_datetime(v) elif inferred_type == 'datetime': value = try_datetime(v) elif inferred_type == 'timedelta': value = try_timedelta(v) elif inferred_type == 'nat': # if all NaT, return as datetime if isna(v).all(): value = try_datetime(v) else: # We have at least a NaT and a string # try timedelta first to avoid spurious datetime conversions # e.g. '00:00:01' is a timedelta but # technically is also a datetime value = try_timedelta(v) if lib.infer_dtype(value) in ['mixed']: value = try_datetime(v) return value def maybe_cast_to_datetime(value, dtype, errors='raise'): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ from pandas.core.tools.timedeltas import to_timedelta from pandas.core.tools.datetimes import to_datetime if dtype is not None: if isinstance(dtype, string_types): dtype = np.dtype(dtype) is_datetime64 = is_datetime64_dtype(dtype) is_datetime64tz = is_datetime64tz_dtype(dtype) is_timedelta64 = is_timedelta64_dtype(dtype) if is_datetime64 or is_datetime64tz or is_timedelta64: # force the dtype if needed msg = ("Passing in '{dtype}' dtype with no frequency is " "deprecated and will raise in a future version. " "Please pass in '{dtype}[ns]' instead.") if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE): if dtype.name in ('datetime64', 'datetime64[ns]'): if dtype.name == 'datetime64': warnings.warn(msg.format(dtype=dtype.name), FutureWarning, stacklevel=5) dtype = _NS_DTYPE else: raise TypeError("cannot convert datetimelike to " "dtype [{dtype}]".format(dtype=dtype)) elif is_datetime64tz: # our NaT doesn't support tz's # this will coerce to DatetimeIndex with # a matching dtype below if is_scalar(value) and isna(value): value = [value] elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE): if dtype.name in ('timedelta64', 'timedelta64[ns]'): if dtype.name == 'timedelta64': warnings.warn(msg.format(dtype=dtype.name), FutureWarning, stacklevel=5) dtype = _TD_DTYPE else: raise TypeError("cannot convert timedeltalike to " "dtype [{dtype}]".format(dtype=dtype)) if is_scalar(value): if value == iNaT or isna(value): value = iNaT else: value = np.array(value, copy=False) # have a scalar array-like (e.g. NaT) if value.ndim == 0: value = iNaT # we have an array of datetime or timedeltas & nulls elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype): try: if is_datetime64: value = to_datetime(value, errors=errors)._values elif is_datetime64tz: # input has to be UTC at this point, so just # localize value = (to_datetime(value, errors=errors) .tz_localize('UTC') .tz_convert(dtype.tz) ) elif is_timedelta64: value = to_timedelta(value, errors=errors)._values except (AttributeError, ValueError, TypeError): pass # coerce datetimelike to object elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype): if is_object_dtype(dtype): if value.dtype != _NS_DTYPE: value = value.astype(_NS_DTYPE) ints = np.asarray(value).view('i8') return tslib.ints_to_pydatetime(ints) # we have a non-castable dtype that was passed raise TypeError('Cannot cast datetime64 to {dtype}' .format(dtype=dtype)) else: is_array = isinstance(value, np.ndarray) # catch a datetime/timedelta that is not of ns variety # and no coercion specified if is_array and value.dtype.kind in ['M', 'm']: dtype = value.dtype if dtype.kind == 'M' and dtype != _NS_DTYPE: value = value.astype(_NS_DTYPE) elif dtype.kind == 'm' and dtype != _TD_DTYPE: value = to_timedelta(value) # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion elif not (is_array and not (issubclass(value.dtype.type, np.integer) or value.dtype == np.object_)): value = maybe_infer_to_datetimelike(value) return value def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if len(types) == 0: raise ValueError('no types given') first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if all(is_dtype_equal(first, t) for t in types[1:]): return first if any(isinstance(t, ExtensionDtype) for t in types): return np.object # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype('datetime64[ns]') if all(is_timedelta64_dtype(t) for t in types): return np.dtype('timedelta64[ns]') # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: has_ints = any(is_integer_dtype(t) for t in types) has_floats = any(is_float_dtype(t) for t in types) has_complex = any(is_complex_dtype(t) for t in types) if has_ints or has_floats or has_complex: return np.object return np.find_common_type(types, []) def cast_scalar_to_array(shape, value, dtype=None): """ create np.ndarray of specified shape and dtype, filled with values Parameters ---------- shape : tuple value : scalar value dtype : np.dtype, optional dtype to coerce Returns ------- ndarray of shape, filled with value, of specified / inferred dtype """ if dtype is None: dtype, fill_value = infer_dtype_from_scalar(value) else: fill_value = value values = np.empty(shape, dtype=dtype) values.fill(fill_value) return values
bsd-3-clause
saullocastro/pyNastran
pyNastran/op2/tables/oes_stressStrain/complex/oes_rods.py
1
11149
from __future__ import (nested_scopes, generators, division, absolute_import, print_function, unicode_literals) from six import iteritems from six.moves import range import numpy as np from numpy import zeros, array_equal, allclose from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object from pyNastran.f06.f06_formatting import write_imag_floats_13e, get_key0, _eigenvalue_header try: import pandas as pd except ImportError: pass class ComplexRodArray(OES_Object): def __init__(self, data_code, is_sort1, isubcase, dt): OES_Object.__init__(self, data_code, isubcase, apply_data_code=False) self.eType = {} #self.code = [self.format_code, self.sort_code, self.s_code] self.nelements = 0 # result specific def is_real(self): return False def is_complex(self): return True def _reset_indices(self): self.itotal = 0 self.ielement = 0 def _get_msgs(self): raise NotImplementedError() def get_headers(self): raise NotImplementedError() def build(self): if self.is_built: return assert self.ntimes > 0, 'ntimes=%s' % self.ntimes assert self.nelements > 0, 'nelements=%s' % self.nelements assert self.ntotal > 0, 'ntotal=%s' % self.ntotal self.nelements //= self.ntimes self.itime = 0 self.ielement = 0 self.itotal = 0 self.is_built = True #print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal)) dtype = 'float32' if isinstance(self.nonlinear_factor, int): dtype = 'int32' self._times = zeros(self.ntimes, dtype=dtype) self.element = zeros(self.nelements, dtype='int32') #[axial, torsion] self.data = zeros((self.ntimes, self.nelements, 2), dtype='complex64') def build_dataframe(self): headers = self.get_headers() column_names, column_values = self._build_dataframe_transient_header() self.data_frame = pd.Panel(self.data, items=column_values, major_axis=self.element, minor_axis=headers).to_frame() self.data_frame.columns.names = column_names self.data_frame.index.names = ['ElementID', 'Item'] def __eq__(self, table): assert self.is_sort1() == table.is_sort1() self._eq_header(table) if not np.array_equal(self.element, table.element): assert self.element.shape == table.element.shape, 'shape=%s element.shape=%s' % (self.element.shape, table.element.shape) msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__) msg += '%s\n' % str(self.code_information()) for eid, eid2 in zip(self.element, table.element): msg += '%s, %s\n' % (eid, eid2) print(msg) raise ValueError(msg) if not np.array_equal(self.data, table.data): msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__) msg += '%s\n' % str(self.code_information()) ntimes = self.data.shape[0] i = 0 if self.is_sort1(): for itime in range(ntimes): for ieid, eid in enumerate(self.element): t1 = self.data[itime, ieid, :] t2 = table.data[itime, ieid, :] (axial1, torsion1) = t1 (axial2, torsion2) = t2 d = t1 - t2 if not allclose([axial1.real, axial1.imag, torsion1.real, torsion1.imag], [axial2.real, axial2.imag, torsion2.real, torsion2.imag], atol=0.0001): #if not np.array_equal(t1, t2): msg += '%-4s (%s, %sj, %s, %sj)\n (%s, %sj, %s, %sj)\n dt12=(%s, %sj, %s, %sj)\n' % ( eid, axial1.real, axial1.imag, torsion1.real, torsion1.imag, axial2.real, axial2.imag, torsion2.real, torsion2.imag, d[0].real, d[0].imag, d[1].real, d[1].imag,) i += 1 if i > 10: print(msg) raise ValueError(msg) else: raise NotImplementedError(self.is_sort2()) if i > 0: print(msg) raise ValueError(msg) return True def add_sort1(self, dt, eid, axial, torsion): self._times[self.itime] = dt self.element[self.ielement] = eid self.data[self.itime, self.ielement, :] = [axial, torsion] self.ielement += 1 def get_stats(self): if not self.is_built: return [ '<%s>\n' % self.__class__.__name__, ' ntimes: %i\n' % self.ntimes, ' ntotal: %i\n' % self.ntotal, ] ntimes, nelements, _ = self.data.shape assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes) assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements) msg = [] if self.nonlinear_factor is not None: # transient msg.append(' type=%s ntimes=%i nelements=%i\n' % (self.__class__.__name__, ntimes, nelements)) ntimes_word = 'ntimes' else: msg.append(' type=%s nelements=%i\n' % (self.__class__.__name__, nelements)) ntimes_word = '1' msg.append(' eType\n') headers = self.get_headers() n = len(headers) msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers)))) msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', '')) msg.append(' element type: %s\n ' % self.element_name) msg += self.get_data_code() return msg def get_element_index(self, eids): itot = searchsorted(eids, self.element) return itot def eid_to_element_node_index(self, eids): ind = searchsorted(eids, self.element) return ind def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True): if header is None: header = [] msg_temp = self.get_f06_header(is_mag_phase) if self.is_sort1(): page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f, msg_temp, is_mag_phase) else: raise NotImplementedError() return page_num def _write_sort1_as_sort1(self, header, page_stamp, page_num, f, msg_temp, is_mag_phase): ntimes = self.data.shape[0] eids = self.element for itime in range(ntimes): dt = self._times[itime] header = _eigenvalue_header(self, header, itime, ntimes, dt) f.write(''.join(header + msg_temp)) axial = self.data[itime, :, 0] torsion = self.data[itime, :, 1] for eid, iaxial, itorsion in zip(eids, axial, torsion): [axialr, torsionr, axiali, torsioni] = write_imag_floats_13e([iaxial, itorsion], is_mag_phase) f.write(' %8i %-13s / %-13s %-13s / %s\n' % ( eid, axialr, axiali, torsionr, torsioni)) f.write(page_stamp % page_num) page_num += 1 return page_num - 1 class ComplexRodStressArray(ComplexRodArray, StressObject): def __init__(self, data_code, is_sort1, isubcase, dt): ComplexRodArray.__init__(self, data_code, is_sort1, isubcase, dt) StressObject.__init__(self, data_code, isubcase) def get_headers(self): headers = ['axial', 'torsion'] return headers def get_f06_header(self, is_mag_phase=True): if self.element_type == 1: element_header = ' C O M P L E X S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n' elif self.element_type == 3: element_header = ' C O M P L E X S T R E S S E S I N R O D E L E M E N T S ( C T U B E )\n' elif self.element_type == 10: element_header = ' C O M P L E X S T R E S S E S I N R O D E L E M E N T S ( C O N R O D )\n' else: raise NotImplementedError('element_name=%r element_type=%s' % (self.element_name, self.element_type)) if is_mag_phase: mag_phase = ' (MAG/PHASE)\n' # not tested else: mag_phase = ' (REAL/IMAGINARY)\n' words = [ element_header, mag_phase, ' \n', ' ELEMENT AXIAL TORQUE\n', ' ID. FORCE\n',] #' 1 -2.459512E+05 / 3.377728E+04 0.0 / 0.0\n',] return words class ComplexRodStrainArray(ComplexRodArray, StrainObject): def __init__(self, data_code, is_sort1, isubcase, dt): ComplexRodArray.__init__(self, data_code, is_sort1, isubcase, dt) StrainObject.__init__(self, data_code, isubcase) def get_headers(self): headers = ['axial', 'torsion'] return headers def get_f06_header(self, is_mag_phase=True): if self.element_type == 1: element_header = ' C O M P L E X S T R A I N S I N R O D E L E M E N T S ( C R O D )\n' elif self.element_type == 3: element_header = ' C O M P L E X S T R A I N S I N R O D E L E M E N T S ( C T U B E )\n' elif self.element_type == 10: element_header = ' C O M P L E X S T R A I N S I N R O D E L E M E N T S ( C O N R O D )\n' else: raise NotImplementedError('element_name=%r element_type=%s' % (self.element_name, self.element_type)) if is_mag_phase: mag_phase = ' (MAG/PHASE)\n' # not tested else: mag_phase = ' (REAL/IMAGINARY)\n' words = [ element_header, mag_phase, ' \n', ' ELEMENT AXIAL TORQUE\n', ' ID. FORCE\n', #' 1 -2.459512E+05 / 3.377728E+04 0.0 / 0.0\n', ] return words
lgpl-3.0
toobaz/pandas
pandas/tests/groupby/test_nth.py
2
16800
import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna from pandas.util.testing import assert_frame_equal, assert_series_equal def test_first_last_nth(df): # tests for first / last / nth grouped = df.groupby("A") first = grouped.first() expected = df.loc[[1, 0], ["B", "C", "D"]] expected.index = Index(["bar", "foo"], name="A") expected = expected.sort_index() assert_frame_equal(first, expected) nth = grouped.nth(0) assert_frame_equal(nth, expected) last = grouped.last() expected = df.loc[[5, 7], ["B", "C", "D"]] expected.index = Index(["bar", "foo"], name="A") assert_frame_equal(last, expected) nth = grouped.nth(-1) assert_frame_equal(nth, expected) nth = grouped.nth(1) expected = df.loc[[2, 3], ["B", "C", "D"]].copy() expected.index = Index(["foo", "bar"], name="A") expected = expected.sort_index() assert_frame_equal(nth, expected) # it works! grouped["B"].first() grouped["B"].last() grouped["B"].nth(0) df.loc[df["A"] == "foo", "B"] = np.nan assert isna(grouped["B"].first()["foo"]) assert isna(grouped["B"].last()["foo"]) assert isna(grouped["B"].nth(0)["foo"]) # v0.14.0 whatsnew df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) g = df.groupby("A") result = g.first() expected = df.iloc[[1, 2]].set_index("A") assert_frame_equal(result, expected) expected = df.iloc[[1, 2]].set_index("A") result = g.nth(0, dropna="any") assert_frame_equal(result, expected) def test_first_last_nth_dtypes(df_mixed_floats): df = df_mixed_floats.copy() df["E"] = True df["F"] = 1 # tests for first / last / nth grouped = df.groupby("A") first = grouped.first() expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]] expected.index = Index(["bar", "foo"], name="A") expected = expected.sort_index() assert_frame_equal(first, expected) last = grouped.last() expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]] expected.index = Index(["bar", "foo"], name="A") expected = expected.sort_index() assert_frame_equal(last, expected) nth = grouped.nth(1) expected = df.loc[[3, 2], ["B", "C", "D", "E", "F"]] expected.index = Index(["bar", "foo"], name="A") expected = expected.sort_index() assert_frame_equal(nth, expected) # GH 2763, first/last shifting dtypes idx = list(range(10)) idx.append(9) s = Series(data=range(11), index=idx, name="IntCol") assert s.dtype == "int64" f = s.groupby(level=0).first() assert f.dtype == "int64" def test_nth(): df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) g = df.groupby("A") assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index("A")) assert_frame_equal(g.nth(1), df.iloc[[1]].set_index("A")) assert_frame_equal(g.nth(2), df.loc[[]].set_index("A")) assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index("A")) assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index("A")) assert_frame_equal(g.nth(-3), df.loc[[]].set_index("A")) assert_series_equal(g.B.nth(0), df.set_index("A").B.iloc[[0, 2]]) assert_series_equal(g.B.nth(1), df.set_index("A").B.iloc[[1]]) assert_frame_equal(g[["B"]].nth(0), df.loc[[0, 2], ["A", "B"]].set_index("A")) exp = df.set_index("A") assert_frame_equal(g.nth(0, dropna="any"), exp.iloc[[1, 2]]) assert_frame_equal(g.nth(-1, dropna="any"), exp.iloc[[1, 2]]) exp["B"] = np.nan assert_frame_equal(g.nth(7, dropna="any"), exp.iloc[[1, 2]]) assert_frame_equal(g.nth(2, dropna="any"), exp.iloc[[1, 2]]) # out of bounds, regression from 0.13.1 # GH 6621 df = DataFrame( { "color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"}, "food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"}, "two": { 0: 1.5456590000000001, 1: -0.070345000000000005, 2: -2.4004539999999999, 3: 0.46206000000000003, 4: 0.52350799999999997, }, "one": { 0: 0.56573799999999996, 1: -0.9742360000000001, 2: 1.033801, 3: -0.78543499999999999, 4: 0.70422799999999997, }, } ).set_index(["color", "food"]) result = df.groupby(level=0, as_index=False).nth(2) expected = df.iloc[[-1]] assert_frame_equal(result, expected) result = df.groupby(level=0, as_index=False).nth(3) expected = df.loc[[]] assert_frame_equal(result, expected) # GH 7559 # from the vbench df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype="int64") s = df[1] g = df[0] expected = s.groupby(g).first() expected2 = s.groupby(g).apply(lambda x: x.iloc[0]) assert_series_equal(expected2, expected, check_names=False) assert expected.name == 1 assert expected2.name == 1 # validate first v = s[g == 1].iloc[0] assert expected.iloc[0] == v assert expected2.iloc[0] == v # this is NOT the same as .first (as sorted is default!) # as it keeps the order in the series (and not the group order) # related GH 7287 expected = s.groupby(g, sort=False).first() result = s.groupby(g, sort=False).nth(0, dropna="all") assert_series_equal(result, expected) with pytest.raises(ValueError, match="For a DataFrame groupby"): s.groupby(g, sort=False).nth(0, dropna=True) # doc example df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) g = df.groupby("A") result = g.B.nth(0, dropna="all") expected = g.B.first() assert_series_equal(result, expected) # test multiple nth values df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"]) g = df.groupby("A") assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index("A")) assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index("A")) assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index("A")) assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index("A")) assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index("A")) assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index("A")) assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index("A")) assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index("A")) business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B") df = DataFrame(1, index=business_dates, columns=["a", "b"]) # get the first, fourth and last two business days for each month key = [df.index.year, df.index.month] result = df.groupby(key, as_index=False).nth([0, 3, -2, -1]) expected_dates = pd.to_datetime( [ "2014/4/1", "2014/4/4", "2014/4/29", "2014/4/30", "2014/5/1", "2014/5/6", "2014/5/29", "2014/5/30", "2014/6/2", "2014/6/5", "2014/6/27", "2014/6/30", ] ) expected = DataFrame(1, columns=["a", "b"], index=expected_dates) assert_frame_equal(result, expected) def test_nth_multi_index(three_group): # PR 9090, related to issue 8979 # test nth on MultiIndex, should match .first() grouped = three_group.groupby(["A", "B"]) result = grouped.nth(0) expected = grouped.first() assert_frame_equal(result, expected) @pytest.mark.parametrize( "data, expected_first, expected_last", [ ( { "id": ["A"], "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), "foo": [1], }, { "id": ["A"], "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), "foo": [1], }, { "id": ["A"], "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), "foo": [1], }, ), ( { "id": ["A", "B", "A"], "time": [ Timestamp("2012-01-01 13:00:00", tz="America/New_York"), Timestamp("2012-02-01 14:00:00", tz="US/Central"), Timestamp("2012-03-01 12:00:00", tz="Europe/London"), ], "foo": [1, 2, 3], }, { "id": ["A", "B"], "time": [ Timestamp("2012-01-01 13:00:00", tz="America/New_York"), Timestamp("2012-02-01 14:00:00", tz="US/Central"), ], "foo": [1, 2], }, { "id": ["A", "B"], "time": [ Timestamp("2012-03-01 12:00:00", tz="Europe/London"), Timestamp("2012-02-01 14:00:00", tz="US/Central"), ], "foo": [3, 2], }, ), ], ) def test_first_last_tz(data, expected_first, expected_last): # GH15884 # Test that the timezone is retained when calling first # or last on groupby with as_index=False df = DataFrame(data) result = df.groupby("id", as_index=False).first() expected = DataFrame(expected_first) cols = ["id", "time", "foo"] assert_frame_equal(result[cols], expected[cols]) result = df.groupby("id", as_index=False)["time"].first() assert_frame_equal(result, expected[["id", "time"]]) result = df.groupby("id", as_index=False).last() expected = DataFrame(expected_last) cols = ["id", "time", "foo"] assert_frame_equal(result[cols], expected[cols]) result = df.groupby("id", as_index=False)["time"].last() assert_frame_equal(result, expected[["id", "time"]]) @pytest.mark.parametrize( "method, ts, alpha", [ ["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"], ["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"], ], ) def test_first_last_tz_multi_column(method, ts, alpha): # GH 21603 category_string = pd.Series(list("abc")).astype("category") df = pd.DataFrame( { "group": [1, 1, 2], "category_string": category_string, "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"), } ) result = getattr(df.groupby("group"), method)() expected = pd.DataFrame( { "category_string": pd.Categorical( [alpha, "c"], dtype=category_string.dtype ), "datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")], }, index=pd.Index([1, 2], name="group"), ) assert_frame_equal(result, expected) def test_nth_multi_index_as_expected(): # PR 9090, related to issue 8979 # test nth on MultiIndex three_group = DataFrame( { "A": [ "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar", "foo", "foo", "foo", ], "B": [ "one", "one", "one", "two", "one", "one", "one", "two", "two", "two", "one", ], "C": [ "dull", "dull", "shiny", "dull", "dull", "shiny", "shiny", "dull", "shiny", "shiny", "shiny", ], } ) grouped = three_group.groupby(["A", "B"]) result = grouped.nth(0) expected = DataFrame( {"C": ["dull", "dull", "dull", "dull"]}, index=MultiIndex.from_arrays( [["bar", "bar", "foo", "foo"], ["one", "two", "one", "two"]], names=["A", "B"], ), ) assert_frame_equal(result, expected) def test_groupby_head_tail(): df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) g_as = df.groupby("A", as_index=True) g_not_as = df.groupby("A", as_index=False) # as_index= False, much easier assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1)) assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1)) empty_not_as = DataFrame( columns=df.columns, index=pd.Index([], dtype=df.index.dtype) ) empty_not_as["A"] = empty_not_as["A"].astype(df.A.dtype) empty_not_as["B"] = empty_not_as["B"].astype(df.B.dtype) assert_frame_equal(empty_not_as, g_not_as.head(0)) assert_frame_equal(empty_not_as, g_not_as.tail(0)) assert_frame_equal(empty_not_as, g_not_as.head(-1)) assert_frame_equal(empty_not_as, g_not_as.tail(-1)) assert_frame_equal(df, g_not_as.head(7)) # contains all assert_frame_equal(df, g_not_as.tail(7)) # as_index=True, (used to be different) df_as = df assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1)) assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1)) empty_as = DataFrame(index=df_as.index[:0], columns=df.columns) empty_as["A"] = empty_not_as["A"].astype(df.A.dtype) empty_as["B"] = empty_not_as["B"].astype(df.B.dtype) assert_frame_equal(empty_as, g_as.head(0)) assert_frame_equal(empty_as, g_as.tail(0)) assert_frame_equal(empty_as, g_as.head(-1)) assert_frame_equal(empty_as, g_as.tail(-1)) assert_frame_equal(df_as, g_as.head(7)) # contains all assert_frame_equal(df_as, g_as.tail(7)) # test with selection assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []]) assert_frame_equal(g_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]]) assert_frame_equal(g_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]]) assert_frame_equal(g_as[["A", "B"]].head(1), df_as.loc[[0, 2]]) assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []]) assert_frame_equal(g_not_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]]) assert_frame_equal(g_not_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]]) assert_frame_equal(g_not_as[["A", "B"]].head(1), df_as.loc[[0, 2]]) def test_group_selection_cache(): # GH 12839 nth, head, and tail should return same result consistently df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) expected = df.iloc[[0, 2]].set_index("A") g = df.groupby("A") result1 = g.head(n=2) result2 = g.nth(0) assert_frame_equal(result1, df) assert_frame_equal(result2, expected) g = df.groupby("A") result1 = g.tail(n=2) result2 = g.nth(0) assert_frame_equal(result1, df) assert_frame_equal(result2, expected) g = df.groupby("A") result1 = g.nth(0) result2 = g.head(n=2) assert_frame_equal(result1, expected) assert_frame_equal(result2, df) g = df.groupby("A") result1 = g.nth(0) result2 = g.tail(n=2) assert_frame_equal(result1, expected) assert_frame_equal(result2, df) def test_nth_empty(): # GH 16064 df = DataFrame(index=[0], columns=["a", "b", "c"]) result = df.groupby("a").nth(10) expected = DataFrame(index=Index([], name="a"), columns=["b", "c"]) assert_frame_equal(result, expected) result = df.groupby(["a", "b"]).nth(10) expected = DataFrame( index=MultiIndex([[], []], [[], []], names=["a", "b"]), columns=["c"] ) assert_frame_equal(result, expected) def test_nth_column_order(): # GH 20760 # Check that nth preserves column order df = DataFrame( [[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]], columns=["A", "C", "B"], ) result = df.groupby("A").nth(0) expected = DataFrame( [["b", 100.0], ["c", 200.0]], columns=["C", "B"], index=Index([1, 2], name="A") ) assert_frame_equal(result, expected) result = df.groupby("A").nth(-1, dropna="any") expected = DataFrame( [["a", 50.0], ["d", 150.0]], columns=["C", "B"], index=Index([1, 2], name="A") ) assert_frame_equal(result, expected) @pytest.mark.parametrize("dropna", [None, "any", "all"]) def test_nth_nan_in_grouper(dropna): # GH 26011 df = DataFrame( [[np.nan, 0, 1], ["abc", 2, 3], [np.nan, 4, 5], ["def", 6, 7], [np.nan, 8, 9]], columns=list("abc"), ) result = df.groupby("a").nth(0, dropna=dropna) expected = pd.DataFrame( [[2, 3], [6, 7]], columns=list("bc"), index=Index(["abc", "def"], name="a") ) assert_frame_equal(result, expected)
bsd-3-clause
abhitopia/tensorflow
tensorflow/python/estimator/inputs/queues/feeding_functions_test.py
58
9375
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests feeding functions using arrays and `DataFrames`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.estimator.inputs.queues import feeding_functions as ff from tensorflow.python.platform import test try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False def vals_to_list(a): return { key: val.tolist() if isinstance(val, np.ndarray) else val for key, val in a.items() } class _FeedingFunctionsTestCase(test.TestCase): """Tests for feeding functions.""" def testArrayFeedFnBatchOne(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 1) # cycle around a couple times for x in range(0, 100): i = x % 16 expected = { "index_placeholder": [i], "value_placeholder": [[2 * i, 2 * i + 1]] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchFive(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 5) # cycle around a couple times for _ in range(0, 101, 2): aff() expected = { "index_placeholder": [15, 0, 1, 2, 3], "value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchTwoWithOneEpoch(self): array = np.arange(5) + 10 placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1) expected = { "index_placeholder": [0, 1], "value_placeholder": [10, 11] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [2, 3], "value_placeholder": [12, 13] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [4], "value_placeholder": [14] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchOneHundred(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 100) expected = { "index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)), "value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6 + [[0, 1], [2, 3], [4, 5], [6, 7]] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self): array = np.arange(2) + 10 placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2) expected = { "index_placeholder": [0, 1, 0, 1], "value_placeholder": [10, 11, 10, 11], } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchOne(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 1) # cycle around a couple times for x in range(0, 100): i = x % 32 expected = { "index_placeholder": [i + 96], "a_placeholder": [32 + i], "b_placeholder": [64 + i] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchFive(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 5) # cycle around a couple times for _ in range(0, 101, 2): aff() expected = { "index_placeholder": [127, 96, 97, 98, 99], "a_placeholder": [63, 32, 33, 34, 35], "b_placeholder": [95, 64, 65, 66, 67] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchTwoWithOneEpoch(self): if not HAS_PANDAS: return array1 = np.arange(32, 37) array2 = np.arange(64, 69) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1) expected = { "index_placeholder": [96, 97], "a_placeholder": [32, 33], "b_placeholder": [64, 65] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [98, 99], "a_placeholder": [34, 35], "b_placeholder": [66, 67] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [100], "a_placeholder": [36], "b_placeholder": [68] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchOneHundred(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 100) expected = { "index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)), "a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)), "b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68)) } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self): if not HAS_PANDAS: return array1 = np.arange(32, 34) array2 = np.arange(64, 66) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2) expected = { "index_placeholder": [96, 97, 96, 97], "a_placeholder": [32, 33, 32, 33], "b_placeholder": [64, 65, 64, 65] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self): a = np.arange(32, 37) b = np.arange(64, 69) x = {"a": a, "b": b} ordered_dict_x = collections.OrderedDict( sorted(x.items(), key=lambda t: t[0])) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._OrderedDictNumpyFeedFn( placeholders, ordered_dict_x, batch_size=2, num_epochs=1) expected = { "index_placeholder": [0, 1], "a_placeholder": [32, 33], "b_placeholder": [64, 65] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [2, 3], "a_placeholder": [34, 35], "b_placeholder": [66, 67] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) expected = { "index_placeholder": [4], "a_placeholder": [36], "b_placeholder": [68] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self): a = np.arange(32, 34) b = np.arange(64, 66) x = {"a": a, "b": b} ordered_dict_x = collections.OrderedDict( sorted(x.items(), key=lambda t: t[0])) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._OrderedDictNumpyFeedFn( placeholders, ordered_dict_x, batch_size=100, num_epochs=2) expected = { "index_placeholder": [0, 1, 0, 1], "a_placeholder": [32, 33, 32, 33], "b_placeholder": [64, 65, 64, 65] } actual = aff() self.assertEqual(expected, vals_to_list(actual)) if __name__ == "__main__": test.main()
apache-2.0
fyfcauc/android_external_chromium-org
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
26
11131
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Do all the steps required to build and test against nacl.""" import optparse import os.path import re import shutil import subprocess import sys import find_chrome # Copied from buildbot/buildbot_lib.py def TryToCleanContents(path, file_name_filter=lambda fn: True): """ Remove the contents of a directory without touching the directory itself. Ignores all failures. """ if os.path.exists(path): for fn in os.listdir(path): TryToCleanPath(os.path.join(path, fn), file_name_filter) # Copied from buildbot/buildbot_lib.py def TryToCleanPath(path, file_name_filter=lambda fn: True): """ Removes a file or directory. Ignores all failures. """ if os.path.exists(path): if file_name_filter(path): print 'Trying to remove %s' % path if os.path.isdir(path): shutil.rmtree(path, ignore_errors=True) else: try: os.remove(path) except Exception: pass else: print 'Skipping %s' % path # TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem. def CleanTempDir(): # Only delete files and directories like: # a) C:\temp\83C4.tmp # b) /tmp/.org.chromium.Chromium.EQrEzl file_name_re = re.compile( r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$') file_name_filter = lambda fn: file_name_re.search(fn) is not None path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp')) if len(path) >= 4 and os.path.isdir(path): print print "Cleaning out the temp directory." print TryToCleanContents(path, file_name_filter) else: print print "Cannot find temp directory, not cleaning it." print def RunCommand(cmd, cwd, env): sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd)) sys.stdout.flush() retcode = subprocess.call(cmd, cwd=cwd, env=env) if retcode != 0: sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd)) sys.exit(retcode) def RunTests(name, cmd, nacl_dir, env): sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name) RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env) sys.stdout.write('\n\nRunning %s tests...\n\n' % name) RunCommand(cmd, nacl_dir, env) def BuildAndTest(options): # Refuse to run under cygwin. if sys.platform == 'cygwin': raise Exception('I do not work under cygwin, sorry.') # By default, use the version of Python is being used to run this script. python = sys.executable if sys.platform == 'darwin': # Mac 10.5 bots tend to use a particularlly old version of Python, look for # a newer version. macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python' if os.path.exists(macpython27): python = macpython27 script_dir = os.path.dirname(os.path.abspath(__file__)) src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir))) nacl_dir = os.path.join(src_dir, 'native_client') # Decide platform specifics. if options.browser_path: chrome_filename = options.browser_path else: chrome_filename = find_chrome.FindChrome(src_dir, [options.mode]) if chrome_filename is None: raise Exception('Cannot find a chome binary - specify one with ' '--browser_path?') env = dict(os.environ) if sys.platform in ['win32', 'cygwin']: if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \ '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''): bits = 64 else: bits = 32 msvs_path = ';'.join([ r'c:\Program Files\Microsoft Visual Studio 9.0\VC', r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC', r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools', r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools', r'c:\Program Files\Microsoft Visual Studio 8\VC', r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC', r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools', r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools', ]) env['PATH'] += ';' + msvs_path scons = [python, 'scons.py'] elif sys.platform == 'darwin': if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 else: p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE) (p_stdout, _) = p.communicate() assert p.returncode == 0 if p_stdout.find('executable x86_64') >= 0: bits = 64 else: bits = 32 scons = [python, 'scons.py'] else: p = subprocess.Popen( 'uname -m | ' 'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"', shell=True, stdout=subprocess.PIPE) (p_stdout, _) = p.communicate() assert p.returncode == 0 if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 elif p_stdout.find('64') >= 0: bits = 64 else: bits = 32 # xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap # the entire build step rather than each test (browser_headless=1). # We also need to make sure that there are at least 24 bits per pixel. # https://code.google.com/p/chromium/issues/detail?id=316687 scons = [ 'xvfb-run', '--auto-servernum', '--server-args', '-screen 0 1024x768x24', python, 'scons.py', ] if options.jobs > 1: scons.append('-j%d' % options.jobs) scons.append('disable_tests=%s' % options.disable_tests) if options.buildbot is not None: scons.append('buildbot=%s' % (options.buildbot,)) # Clean the output of the previous build. # Incremental builds can get wedged in weird ways, so we're trading speed # for reliability. shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True) # check that the HOST (not target) is 64bit # this is emulating what msvs_env.bat is doing if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \ '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''): # 64bit HOST env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\' 'Microsoft Visual Studio 9.0\\Common7\\Tools\\') env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\' 'Microsoft Visual Studio 8.0\\Common7\\Tools\\') else: # 32bit HOST env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\' 'Common7\\Tools\\') env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\' 'Common7\\Tools\\') # Run nacl/chrome integration tests. # Note that we have to add nacl_irt_test to --mode in order to get # inbrowser_test_runner to run. # TODO(mseaborn): Change it so that inbrowser_test_runner is not a # special case. cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits, '--mode=opt-host,nacl,nacl_irt_test', 'chrome_browser_path=%s' % chrome_filename, ] if not options.integration_bot and not options.morenacl_bot: cmd.append('disable_flaky_tests=1') cmd.append('chrome_browser_tests') # Propagate path to JSON output if present. # Note that RunCommand calls sys.exit on errors, so potential errors # from one command won't be overwritten by another one. Overwriting # a successful results file with either success or failure is fine. if options.json_build_results_output_file: cmd.append('json_build_results_output_file=%s' % options.json_build_results_output_file) # Download the toolchain(s). RunCommand([python, os.path.join(nacl_dir, 'build', 'download_toolchains.py'), '--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'], nacl_dir, os.environ) CleanTempDir() if options.enable_newlib: RunTests('nacl-newlib', cmd, nacl_dir, env) if options.enable_glibc: RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env) def MakeCommandLineParser(): parser = optparse.OptionParser() parser.add_option('-m', '--mode', dest='mode', default='Debug', help='Debug/Release mode') parser.add_option('-j', dest='jobs', default=1, type='int', help='Number of parallel jobs') parser.add_option('--enable_newlib', dest='enable_newlib', default=-1, type='int', help='Run newlib tests?') parser.add_option('--enable_glibc', dest='enable_glibc', default=-1, type='int', help='Run glibc tests?') parser.add_option('--json_build_results_output_file', help='Path to a JSON file for machine-readable output.') # Deprecated, but passed to us by a script in the Chrome repo. # Replaced by --enable_glibc=0 parser.add_option('--disable_glibc', dest='disable_glibc', action='store_true', default=False, help='Do not test using glibc.') parser.add_option('--disable_tests', dest='disable_tests', type='string', default='', help='Comma-separated list of tests to omit') builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '') is_integration_bot = 'nacl-chrome' in builder_name parser.add_option('--integration_bot', dest='integration_bot', type='int', default=int(is_integration_bot), help='Is this an integration bot?') is_morenacl_bot = ( 'More NaCl' in builder_name or 'naclmore' in builder_name) parser.add_option('--morenacl_bot', dest='morenacl_bot', type='int', default=int(is_morenacl_bot), help='Is this a morenacl bot?') # Not used on the bots, but handy for running the script manually. parser.add_option('--bits', dest='bits', action='store', type='int', default=None, help='32/64') parser.add_option('--browser_path', dest='browser_path', action='store', type='string', default=None, help='Path to the chrome browser.') parser.add_option('--buildbot', dest='buildbot', action='store', type='string', default=None, help='Value passed to scons as buildbot= option.') return parser def Main(): parser = MakeCommandLineParser() options, args = parser.parse_args() if options.integration_bot and options.morenacl_bot: parser.error('ERROR: cannot be both an integration bot and a morenacl bot') # Set defaults for enabling newlib. if options.enable_newlib == -1: options.enable_newlib = 1 # Set defaults for enabling glibc. if options.enable_glibc == -1: if options.integration_bot or options.morenacl_bot: options.enable_glibc = 1 else: options.enable_glibc = 0 if args: parser.error('ERROR: invalid argument') BuildAndTest(options) if __name__ == '__main__': Main()
bsd-3-clause
DeveloperJose/Vision-Rat-Brain
feature_matching_v1/graph.py
2
2648
# -*- coding: utf-8 -*- import pylab as plt import numpy as np from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg from PyQt5.QtWidgets import QSizePolicy class Graph(FigureCanvasQTAgg): def __init__(self, parent=None, width=10, height=10, dpi=100): figure = plt.Figure(figsize=(width, height), dpi=dpi) figure.subplots_adjust(left = 0.0, right = 1.0, top = 1.0, bottom = 0.0, wspace = 0.0, hspace = 0.0) super(Graph, self).__init__(figure) self.parent = parent self.figure = figure # Fill the area with the graph self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.axes = figure.add_subplot(111) self.axes.set_xticks(()) self.axes.set_yticks(()) self.im = None self.is_interactive = True self.corners_callback = None self.corners = None self.scatter_plots = [] figure.canvas.mpl_connect("button_press_event", self.button_press_event) def clear_corners(self): self.corners = None # Clear scatterplot overlays if they exist if len(self.scatter_plots) > 0: for plot in self.scatter_plots: plot.remove() self.scatter_plots = [] self.draw() def button_press_event(self, event): if not self.is_interactive: return # Right-click erases scatterplot overlays if event.button == 3: self.clear_corners() # Left-click adds point markers elif event.button == 1 and event.xdata and event.ydata: p = np.array([event.xdata, event.ydata]) if self.corners is None: self.corners = np.array([p]) else: temp = np.vstack((self.corners, p)) self.corners = temp draw_plot = True if self.corners_callback: draw_plot = self.corners_callback() if draw_plot and self.corners is not None: # Draw new overlay plot = self.axes.scatter(*zip(*self.corners), c="r", s=2) self.scatter_plots.append(plot) self.draw() def imshow(self, im): # Fixes the issue of plot auto-rescaling # When overlaying scatterplot points self.axes.set_xlim([0, im.shape[1]]) self.axes.set_ylim([im.shape[0], 0]) self.im = im self.axes.imshow(im) self.draw()
mit
kcyu2014/ml_project2
project2/utils/images2patches.py
1
5530
""" This module processes the dataset Massachusetts obained from https://www.cs.toronto.edu/~vmnih/data/. The original dataset contains sattelite images of Massachusetts and its surroundings. The images are ofsize 1500x1500, 3 channels, TIFF format. The dataset also contains separate labels for roads and buildings. This mdule only processes the road dataset. Processing: Adjusted for the needs of Project 2, Machine Learning course, EPFL (fall 2016). Since the original sattelite images are of different zoom level and size than the dataset provided for the project, it needs to be rescaled and cropped (both the sattelite image and its corresponding mask). From each original image the non-overlaping patches are taken and only those that contain at least `maskWhitePxRatioTh` * 100 percent of roads are kept. The resulting patches are stored in `outputPath` directory. """ from PIL import Image from matplotlib import pyplot as plt import os import numpy as np ######################################################################################################################## # INPUT PARAMETERS ######################################################################################################################## # Input dataset path. inputPath = '../../../ext_data/massachusetts/original/' outputPath = '../../../ext_data/massachusetts/patches/' mapDir = 'map' satDir = 'sat' # Threshold for all-white parts of the sattelite images - ratio of white pixels (intensity == 255). If the white/other # ratio is higher than this threshold, the image is dropped. whitePxRatioTh = 0.001 # Threshold of roads vs. background within mask patch - if the roads/background ratio is lower then this threshold, # the patch is dropped. maskWhitePxRatioTh = 0.005 # Upscale image and mask ratio. upscale = (2.0, 2.0) patchSize = (400, 400) ######################################################################################################################## # MAIN SCRIPT ######################################################################################################################## imagesFiles = [im for im in os.listdir(inputPath + satDir) if im.endswith('.tiff')] numFiles = len(imagesFiles) for idx, imgFile in enumerate(imagesFiles): print('Processing image {im} / {tot}'.format(im=idx + 1, tot=numFiles)) # Load satelite image. img = Image.open(inputPath + satDir + '/' + imgFile) assert(img.mode == 'RGB') # Get image size. imgSize = img.size # Convert image to grayscale. gsImg = img.convert(mode='L') hist = gsImg.histogram() whitePxRatio = float(hist[255]) / (imgSize[0] * imgSize[1]) # If the image contains no or insignificant white parts, process it further. if whitePxRatio < whitePxRatioTh: # Load ground truth road binary mask try: gtMask = Image.open(inputPath + mapDir + '/' + imgFile) except: print('Error: cannot open ground truth binary mask file {f}'.format(f=inputPath + mapDir + '/' + imgFile)) continue # Check that mask's size matches the corresponding image. assert(gtMask.size == imgSize) # Upscale the image and the mask. For upsampling, nearest neighbour (NEAREST) is used. # Another possible option is BICUBIC (only for sattelite img), which, however, blurs the image. We need to experiment # to find out which one is better. newSize = (int(imgSize[0] * upscale[0]), int(imgSize[1] * upscale[1])) imgSize = newSize # Check that at least one patch can fit in the original image. assert(newSize[0] // patchSize[0] > 0) assert(newSize[1] // patchSize[1] > 0) img = img.resize(newSize, resample=Image.NEAREST) gtMask = gtMask.resize(newSize, resample=Image.NEAREST) # Generate x,y coordinates of centers of patches. left = 0 right = imgSize[0] - patchSize[0] top = 0 bottom = imgSize[1] - patchSize[1] numPatchesInRow = imgSize[0] // patchSize[0] numPatchesInCol = imgSize[1] // patchSize[1] centersInRow = np.linspace(left, right, numPatchesInRow, dtype=np.int32) centersInCol = np.linspace(top, bottom, numPatchesInCol, dtype=np.int32) # Coordinates of patches (left, top, right, bottom) patchesCoords = [(l, t, l + patchSize[0], t + patchSize[1]) for t in centersInCol for l in centersInRow] # Process each patch for pc in patchesCoords: # Get a patch of img and mask. patchMask = gtMask.crop(pc) patchImg = img.crop(pc) # Check correct size of a patch. assert(patchMask.size == patchSize) # Find the ratio of white pixels (roads) to black pixels (background). patchMaskHist = patchMask.histogram() maskWhitePxRatio = float(patchMaskHist[255]) / (patchSize[0] * patchSize[1]) # Check whether there is sufficient amount of roads in this patch and if so, save the patch (img and mask). if maskWhitePxRatio > maskWhitePxRatioTh: nameSuffix = '_(' + str(pc[1] + patchSize[1] // 2) + ', ' + str(pc[0] + patchSize[0] // 2) + ')' name = imgFile[:-5] + nameSuffix + '.tiff' patchImg.save(outputPath + satDir + '/' + name) patchMask.save(outputPath + mapDir + '/' + name)
mit
shareactorIO/pipeline
source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/SkFlow_DEPRECATED/text_classification_character_cnn.py
6
3495
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is an example of using convolutional networks over characters for DBpedia dataset to predict class from description of an entity. This model is similar to one described in this paper: "Character-level Convolutional Networks for Text Classification" http://arxiv.org/abs/1509.01626 and is somewhat alternative to the Lua code from here: https://github.com/zhangxiangxiao/Crepe """ import numpy as np from sklearn import metrics import pandas import tensorflow as tf import skflow ### Training data # Download dbpedia_csv.tar.gz from # https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M # Unpack: tar -xvf dbpedia_csv.tar.gz train = pandas.read_csv('dbpedia_csv/train.csv', header=None) X_train, y_train = train[2], train[0] test = pandas.read_csv('dbpedia_csv/test.csv', header=None) X_test, y_test = test[2], test[0] ### Process vocabulary MAX_DOCUMENT_LENGTH = 100 char_processor = skflow.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH) X_train = np.array(list(char_processor.fit_transform(X_train))) X_test = np.array(list(char_processor.transform(X_test))) ### Models N_FILTERS = 10 FILTER_SHAPE1 = [20, 256] FILTER_SHAPE2 = [20, N_FILTERS] POOLING_WINDOW = 4 POOLING_STRIDE = 2 def char_cnn_model(X, y): """Character level convolutional neural network model to predict classes.""" byte_list = tf.reshape(skflow.ops.one_hot_matrix(X, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1]) with tf.variable_scope('CNN_Layer1'): # Apply Convolution filtering on input sequence. conv1 = skflow.ops.conv2d(byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID') # Add a RELU for non linearity. conv1 = tf.nn.relu(conv1) # Max pooling across output of Convlution+Relu. pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1], strides=[1, POOLING_STRIDE, 1, 1], padding='SAME') # Transpose matrix so that n_filters from convolution becomes width. pool1 = tf.transpose(pool1, [0, 1, 3, 2]) with tf.variable_scope('CNN_Layer2'): # Second level of convolution filtering. conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID') # Max across each filter to get useful features for classification. pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1]) # Apply regular WX + B and classification. return skflow.models.logistic_regression(pool2, y) classifier = skflow.TensorFlowEstimator(model_fn=char_cnn_model, n_classes=15, steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True) # Continuously train for 1000 steps & predict on test set. while True: classifier.fit(X_train, y_train) score = metrics.accuracy_score(y_test, classifier.predict(X_test)) print("Accuracy: %f" % score)
apache-2.0
FRESNA/PyPSA
pypsa/stats.py
1
8034
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Post-solving statistics of network. This module contains functions to anaylize an optimized network. Basic information of network can be summarized as well as constraint gaps can be double-checked. """ from .descriptors import (expand_series, get_switchable_as_dense as get_as_dense, nominal_attrs) import pandas as pd import logging idx = pd.IndexSlice # ============================================================================= # Network summary # ============================================================================= opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} def calculate_costs(n): raise NotImplementedError mc = {} for c in n.iterate_comonents(): if 'marginal_cost' in c.df: mc[c] = c.df @ c.pnl['p'] def calculate_curtailment(n): max_pu = n.generators_t.p_max_pu avail = (max_pu.multiply(n.generators.p_nom_opt.loc[max_pu.columns]).sum() .groupby(n.generators.carrier).sum()) used = (n.generators_t.p[max_pu.columns].sum() .groupby(n.generators.carrier).sum()) return (((avail - used)/avail)*100).round(3) # and others from pypsa-eur # ============================================================================= # gap analysis # ============================================================================= def describe_storage_unit_contraints(n): """ Checks whether all storage units are balanced over time. This function requires the network to contain the separate variables p_store and p_dispatch, since they cannot be reconstructed from p. The latter results from times tau where p_store(tau) > 0 **and** p_dispatch(tau) > 0, which is allowed (even though not economic). Therefor p_store is necessarily equal to negative entries of p, vice versa for p_dispatch. """ sus = n.storage_units sus_i = sus.index if sus_i.empty: return sns = n.snapshots c = 'StorageUnit' pnl = n.pnl(c) description = {} eh = expand_series(n.snapshot_weightings, sus_i) stand_eff = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) dispatch_eff = expand_series(n.df(c).efficiency_dispatch, sns).T store_eff = expand_series(n.df(c).efficiency_store, sns).T inflow = get_as_dense(n, c, 'inflow') * eh spill = eh[pnl.spill.columns] * pnl.spill description['Spillage Limit'] = pd.Series({'min': (inflow[spill.columns] - spill).min().min()}) if 'p_store' in pnl: soc = pnl.state_of_charge store = store_eff * eh * pnl.p_store#.clip(upper=0) dispatch = 1/dispatch_eff * eh * pnl.p_dispatch#(lower=0) start = soc.iloc[-1].where(sus.cyclic_state_of_charge, sus.state_of_charge_initial) previous_soc = stand_eff * soc.shift().fillna(start) reconstructed = (previous_soc.add(store, fill_value=0) .add(inflow, fill_value=0) .add(-dispatch, fill_value=0) .add(-spill, fill_value=0)) description['SOC Balance StorageUnit'] = ((reconstructed - soc) .unstack().describe()) else: logging.info('Storage Unit SOC balance not reconstructable as no ' 'p_store and p_dispatch in n.storage_units_t.') return pd.concat(description, axis=1, sort=False) def describe_nodal_balance_constraint(n): """ Helper function to double check whether network flow is balanced """ network_injection = pd.concat( [n.pnl(c)[f'p{inout}'].rename(columns=n.df(c)[f'bus{inout}']) for inout in (0, 1) for c in ('Line', 'Transformer')], axis=1)\ .groupby(level=0, axis=1).sum() return (n.buses_t.p - network_injection).unstack().describe()\ .to_frame('Nodal Balance Constr.') def describe_upper_dispatch_constraints(n): ''' Recalculates the minimum gap between operational status and nominal capacity ''' description = {} key = ' Upper Limit' for c, attr in nominal_attrs.items(): dispatch_attr = 'p0' if c in ['Line', 'Transformer', 'Link'] else attr[0] description[c + key] = pd.Series({'min': (n.df(c)[attr + '_opt'] * get_as_dense(n, c, attr[0] + '_max_pu') - n.pnl(c)[dispatch_attr]).min().min()}) return pd.concat(description, axis=1) def describe_lower_dispatch_constraints(n): description = {} key = ' Lower Limit' for c, attr in nominal_attrs.items(): if c in ['Line', 'Transformer', 'Link']: dispatch_attr = 'p0' description[c] = pd.Series({'min': (n.df(c)[attr + '_opt'] * get_as_dense(n, c, attr[0] + '_max_pu') + n.pnl(c)[dispatch_attr]).min().min()}) else: dispatch_attr = attr[0] description[c + key] = pd.Series({'min': (-n.df(c)[attr + '_opt'] * get_as_dense(n, c, attr[0] + '_min_pu') + n.pnl(c)[dispatch_attr]).min().min()}) return pd.concat(description, axis=1) def describe_store_contraints(n): """ Checks whether all stores are balanced over time. """ stores = n.stores stores_i = stores.index if stores_i.empty: return sns = n.snapshots c = 'Store' pnl = n.pnl(c) eh = expand_series(n.snapshot_weightings, stores_i) stand_eff = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) start = pnl.e.iloc[-1].where(stores.e_cyclic, stores.e_initial) previous_e = stand_eff * pnl.e.shift().fillna(start) return (previous_e - pnl.p - pnl.e).unstack().describe()\ .to_frame('SOC Balance Store') def describe_cycle_constraints(n): weightings = n.lines.x_pu_eff.where(n.lines.carrier == 'AC', n.lines.r_pu_eff) def cycle_flow(sub): C = pd.DataFrame(sub.C.todense(), index=sub.lines_i()) if C.empty: return None C_weighted = 1e5 * C.mul(weightings[sub.lines_i()], axis=0) return C_weighted.apply(lambda ds: ds @ n.lines_t.p0[ds.index].T) return pd.concat([cycle_flow(sub) for sub in n.sub_networks.obj], axis=0)\ .unstack().describe().to_frame('Cycle Constr.') def constraint_stats(n, round_digit=1e-30): """ Post-optimization function to recalculate gap statistics of different constraints. For inequality constraints only the minimum of lhs - rhs, with lhs >= rhs is returned. """ return pd.concat([describe_cycle_constraints(n), describe_store_contraints(n), describe_storage_unit_contraints(n), describe_nodal_balance_constraint(n), describe_lower_dispatch_constraints(n), describe_upper_dispatch_constraints(n)], axis=1, sort=False) def check_constraints(n, tol=1e-3): """ Post-optimization test function to double-check most of the lopf constraints. For relevant equaility constraints, it test whether the deviation between lhs and rhs is below the given tolerance. For inequality constraints, it test whether the inequality is violated with a higher value then the tolerance. Parameters ---------- n : pypsa.Network tol : float Gap tolerance Returns AssertionError if tolerance is exceeded. """ n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) stats = constraint_stats(n).rename(index=str.title) condition = stats.T[['Min', 'Max']].query('Min < -@tol | Max > @tol').T assert condition.empty, (f'The following constraint(s) are exceeding the ' f'given tolerance of {tol}: \n{condition}')
gpl-3.0
facom/Sinfin
db/test.py
1
1814
#-*-coding:utf-8-*- from sinfin import * from matplotlib import use,font_manager as fm use('Agg') from matplotlib import colors,ticker,patches,pylab as plt from matplotlib.pyplot import cm from matplotlib.font_manager import FontProperties from matplotlib.transforms import offset_copy from numpy import * # CONNECT TO DATABASE sinfin,connection=loadDatabase() db=connection.cursor() # CONSTANTS H=1100.0 W=850.0 #FP=fm.FontProperties(family='monospace',size=24) FP=fm.FontProperties(style='normal', size=24) # CREATE CANVAS size=11 fig=plt.figure(figsize=(size,size*W/H),dpi=300) ax=fig.add_axes([0,0,1,1]) ax.axis('off') ax.set_xlim((0,1)) ax.set_ylim((0,1)) from matplotlib import patches as pat from matplotlib import textpath as tp def textBox(text,c,w,h): rect=pat.Rectangle(c,w,h,ec='k',fc='w') ax.add_artist(rect) rx,ry=rect.get_xy() cx=rx+rect.get_width()/2.0 cy=ry+rect.get_height()/2.0 ax.annotate(text,(cx,cy),color='k', ha='center',va='center', fontproperties=FP) def textProps(text): p=tp.TextPath((0,0),text,prop=FP) b=p.get_extents() return b.width/0.95,b.height/0.95 largo="El Perro hace pipi en la call" textBox(largo,(0.2,0.3),0.4,0.6) w,h=textProps(largo) print w/W text=u"Astronomía\nBuena" s=len(text) fs=36 fw=0.67*fs ax.text(0.5,0.9,text, horizontalalignment="left", fontproperties=FP, bbox=dict(ec='k',fc='w',pad=30), transform=ax.transAxes) p=tp.TextPath((0,0),text,prop=FP) b=p.get_extents() """ for l in linspace(0,1,11): print l ax.axhline(l,color='k') """ ax.axhline(0.9,color='r') ax.axhline(0.9+b.height/(0.95*H),color='r') ax.axvline(0.5,color='r') ax.axvline(0.5+b.width/(0.95*W),color='r') fig.savefig("pensum.png") plt.close()
gpl-3.0
toobaz/pandas
pandas/tests/io/formats/test_to_latex.py
2
19273
import codecs from datetime import datetime import pytest import pandas as pd from pandas import DataFrame, Series from pandas.util import testing as tm class TestToLatex: def test_to_latex_filename(self, float_frame): with tm.ensure_clean("test.tex") as path: float_frame.to_latex(path) with open(path, "r") as f: assert float_frame.to_latex() == f.read() # test with utf-8 and encoding option (GH 7061) df = DataFrame([["au\xdfgangen"]]) with tm.ensure_clean("test.tex") as path: df.to_latex(path, encoding="utf-8") with codecs.open(path, "r", encoding="utf-8") as f: assert df.to_latex() == f.read() # test with utf-8 without encoding option with tm.ensure_clean("test.tex") as path: df.to_latex(path) with codecs.open(path, "r", encoding="utf-8") as f: assert df.to_latex() == f.read() def test_to_latex(self, float_frame): # it works! float_frame.to_latex() df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex() withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule a & b \\ \midrule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected def test_to_latex_format(self, float_frame): # GH Bug #9402 float_frame.to_latex(column_format="ccc") df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex(column_format="ccc") withindex_expected = r"""\begin{tabular}{ccc} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_empty(self): df = DataFrame() result = df.to_latex() expected = r"""\begin{tabular}{l} \toprule Empty DataFrame Columns: Index([], dtype='object') Index: Index([], dtype='object') \\ \bottomrule \end{tabular} """ assert result == expected result = df.to_latex(longtable=True) expected = r"""\begin{longtable}{l} \toprule Empty DataFrame Columns: Index([], dtype='object') Index: Index([], dtype='object') \\ \end{longtable} """ assert result == expected def test_to_latex_with_formatters(self): df = DataFrame( { "datetime64": [ datetime(2016, 1, 1), datetime(2016, 2, 5), datetime(2016, 3, 3), ], "float": [1.0, 2.0, 3.0], "int": [1, 2, 3], "object": [(1, 2), True, False], } ) formatters = { "datetime64": lambda x: x.strftime("%Y-%m"), "float": lambda x: "[{x: 4.1f}]".format(x=x), "int": lambda x: "0x{x:x}".format(x=x), "object": lambda x: "-{x!s}-".format(x=x), "__index__": lambda x: "index: {x}".format(x=x), } result = df.to_latex(formatters=dict(formatters)) expected = r"""\begin{tabular}{llrrl} \toprule {} & datetime64 & float & int & object \\ \midrule index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_multiindex(self): df = DataFrame({("x", "y"): ["a"]}) result = df.to_latex() expected = r"""\begin{tabular}{ll} \toprule {} & x \\ {} & y \\ \midrule 0 & a \\ \bottomrule \end{tabular} """ assert result == expected result = df.T.to_latex() expected = r"""\begin{tabular}{lll} \toprule & & 0 \\ \midrule x & y & a \\ \bottomrule \end{tabular} """ assert result == expected df = DataFrame.from_dict( { ("c1", 0): pd.Series({x: x for x in range(4)}), ("c1", 1): pd.Series({x: x + 4 for x in range(4)}), ("c2", 0): pd.Series({x: x for x in range(4)}), ("c2", 1): pd.Series({x: x + 4 for x in range(4)}), ("c3", 0): pd.Series({x: x for x in range(4)}), } ).T result = df.to_latex() expected = r"""\begin{tabular}{llrrrr} \toprule & & 0 & 1 & 2 & 3 \\ \midrule c1 & 0 & 0 & 1 & 2 & 3 \\ & 1 & 4 & 5 & 6 & 7 \\ c2 & 0 & 0 & 1 & 2 & 3 \\ & 1 & 4 & 5 & 6 & 7 \\ c3 & 0 & 0 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ assert result == expected # GH 14184 df = df.T df.columns.names = ["a", "b"] result = df.to_latex() expected = r"""\begin{tabular}{lrrrrr} \toprule a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ b & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 4 & 0 & 4 & 0 \\ 1 & 1 & 5 & 1 & 5 & 1 \\ 2 & 2 & 6 & 2 & 6 & 2 \\ 3 & 3 & 7 & 3 & 7 & 3 \\ \bottomrule \end{tabular} """ assert result == expected # GH 10660 df = pd.DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) result = df.set_index(["a", "b"]).to_latex() expected = r"""\begin{tabular}{llr} \toprule & & c \\ a & b & \\ \midrule 0 & a & 1 \\ & b & 2 \\ 1 & a & 3 \\ & b & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.groupby("a").describe().to_latex() expected = r"""\begin{tabular}{lrrrrrrrr} \toprule {} & \multicolumn{8}{l}{c} \\ {} & count & mean & std & min & 25\% & 50\% & 75\% & max \\ a & & & & & & & & \\ \midrule 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\ 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_multiindex_dupe_level(self): # see gh-14484 # # If an index is repeated in subsequent rows, it should be # replaced with a blank in the created table. This should # ONLY happen if all higher order indices (to the left) are # equal too. In this test, 'c' has to be printed both times # because the higher order index 'A' != 'B'. df = pd.DataFrame( index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"] ) result = df.to_latex() expected = r"""\begin{tabular}{lll} \toprule & & col \\ \midrule A & c & NaN \\ B & c & NaN \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_multicolumnrow(self): df = pd.DataFrame( { ("c1", 0): {x: x for x in range(5)}, ("c1", 1): {x: x + 5 for x in range(5)}, ("c2", 0): {x: x for x in range(5)}, ("c2", 1): {x: x + 5 for x in range(5)}, ("c3", 0): {x: x for x in range(5)}, } ) result = df.to_latex() expected = r"""\begin{tabular}{lrrrrr} \toprule {} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ {} & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 5 & 0 & 5 & 0 \\ 1 & 1 & 6 & 1 & 6 & 1 \\ 2 & 2 & 7 & 2 & 7 & 2 \\ 3 & 3 & 8 & 3 & 8 & 3 \\ 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.to_latex(multicolumn=False) expected = r"""\begin{tabular}{lrrrrr} \toprule {} & c1 & & c2 & & c3 \\ {} & 0 & 1 & 0 & 1 & 0 \\ \midrule 0 & 0 & 5 & 0 & 5 & 0 \\ 1 & 1 & 6 & 1 & 6 & 1 \\ 2 & 2 & 7 & 2 & 7 & 2 \\ 3 & 3 & 8 & 3 & 8 & 3 \\ 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ assert result == expected result = df.T.to_latex(multirow=True) expected = r"""\begin{tabular}{llrrrrr} \toprule & & 0 & 1 & 2 & 3 & 4 \\ \midrule \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ assert result == expected df.index = df.T.index result = df.T.to_latex(multirow=True, multicolumn=True, multicolumn_format="c") expected = r"""\begin{tabular}{llrrrrr} \toprule & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\ & & 0 & 1 & 0 & 1 & 0 \\ \midrule \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ & 1 & 5 & 6 & 7 & 8 & 9 \\ \cline{1-7} c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ assert result == expected def test_to_latex_escape(self): a = "a" b = "b" test_dict = {"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}} unescaped_result = DataFrame(test_dict).to_latex(escape=False) escaped_result = DataFrame(test_dict).to_latex() # default: escape=True unescaped_expected = r"""\begin{tabular}{lll} \toprule {} & co$e^x$ & co^l1 \\ \midrule a & a & a \\ b & b & b \\ \bottomrule \end{tabular} """ escaped_expected = r"""\begin{tabular}{lll} \toprule {} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\ \midrule a & a & a \\ b & b & b \\ \bottomrule \end{tabular} """ assert unescaped_result == unescaped_expected assert escaped_result == escaped_expected def test_to_latex_special_escape(self): df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"]) escaped_result = df.to_latex() escaped_expected = r"""\begin{tabular}{ll} \toprule {} & 0 \\ \midrule 0 & a\textbackslash b\textbackslash c \\ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\ \bottomrule \end{tabular} """ assert escaped_result == escaped_expected def test_to_latex_longtable(self, float_frame): float_frame.to_latex(longtable=True) df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex(longtable=True) withindex_expected = r"""\begin{longtable}{lrl} \toprule {} & a & b \\ \midrule \endhead \midrule \multicolumn{3}{r}{{Continued on next page}} \\ \midrule \endfoot \bottomrule \endlastfoot 0 & 1 & b1 \\ 1 & 2 & b2 \\ \end{longtable} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, longtable=True) withoutindex_expected = r"""\begin{longtable}{rl} \toprule a & b \\ \midrule \endhead \midrule \multicolumn{2}{r}{{Continued on next page}} \\ \midrule \endfoot \bottomrule \endlastfoot 1 & b1 \\ 2 & b2 \\ \end{longtable} """ assert withoutindex_result == withoutindex_expected df = DataFrame({"a": [1, 2]}) with1column_result = df.to_latex(index=False, longtable=True) assert r"\multicolumn{1}" in with1column_result df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) with3columns_result = df.to_latex(index=False, longtable=True) assert r"\multicolumn{3}" in with3columns_result def test_to_latex_escape_special_chars(self): special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"] df = DataFrame(data=special_characters) observed = df.to_latex() expected = r"""\begin{tabular}{ll} \toprule {} & 0 \\ \midrule 0 & \& \\ 1 & \% \\ 2 & \$ \\ 3 & \# \\ 4 & \_ \\ 5 & \{ \\ 6 & \} \\ 7 & \textasciitilde \\ 8 & \textasciicircum \\ 9 & \textbackslash \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_no_header(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex(header=False) withindex_expected = r"""\begin{tabular}{lrl} \toprule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, header=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected def test_to_latex_specified_header(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex(header=["AA", "BB"]) withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & AA & BB \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected withoutindex_result = df.to_latex(header=["AA", "BB"], index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule AA & BB \\ \midrule 1 & b1 \\ 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutindex_result == withoutindex_expected withoutescape_result = df.to_latex(header=["$A$", "$B$"], escape=False) withoutescape_expected = r"""\begin{tabular}{lrl} \toprule {} & $A$ & $B$ \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert withoutescape_result == withoutescape_expected with pytest.raises(ValueError): df.to_latex(header=["A"]) def test_to_latex_decimal(self, float_frame): # GH 12031 float_frame.to_latex() df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]}) withindex_result = df.to_latex(decimal=",") withindex_expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1,0 & b1 \\ 1 & 2,1 & b2 \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_series(self): s = Series(["a", "b", "c"]) withindex_result = s.to_latex() withindex_expected = r"""\begin{tabular}{ll} \toprule {} & 0 \\ \midrule 0 & a \\ 1 & b \\ 2 & c \\ \bottomrule \end{tabular} """ assert withindex_result == withindex_expected def test_to_latex_bold_rows(self): # GH 16707 df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) observed = df.to_latex(bold_rows=True) expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule \textbf{0} & 1 & b1 \\ \textbf{1} & 2 & b2 \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_no_bold_rows(self): # GH 16707 df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) observed = df.to_latex(bold_rows=False) expected = r"""\begin{tabular}{lrl} \toprule {} & a & b \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ assert observed == expected @pytest.mark.parametrize("name0", [None, "named0"]) @pytest.mark.parametrize("name1", [None, "named1"]) @pytest.mark.parametrize("axes", [[0], [1], [0, 1]]) def test_to_latex_multiindex_names(self, name0, name1, axes): # GH 18667 names = [name0, name1] mi = pd.MultiIndex.from_product([[1, 2], [3, 4]]) df = pd.DataFrame(-1, index=mi.copy(), columns=mi.copy()) for idx in axes: df.axes[idx].names = names idx_names = tuple(n or "{}" for n in names) idx_names_row = ( "{idx_names[0]} & {idx_names[1]} & & & & \\\\\n".format( idx_names=idx_names ) if (0 in axes and any(names)) else "" ) placeholder = "{}" if any(names) and 1 in axes else " " col_names = [n if (bool(n) and 1 in axes) else placeholder for n in names] observed = df.to_latex() expected = r"""\begin{tabular}{llrrrr} \toprule & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\ & %s & 3 & 4 & 3 & 4 \\ %s\midrule 1 & 3 & -1 & -1 & -1 & -1 \\ & 4 & -1 & -1 & -1 & -1 \\ 2 & 3 & -1 & -1 & -1 & -1 \\ & 4 & -1 & -1 & -1 & -1 \\ \bottomrule \end{tabular} """ % tuple( list(col_names) + [idx_names_row] ) assert observed == expected @pytest.mark.parametrize("one_row", [True, False]) def test_to_latex_multiindex_nans(self, one_row): # GH 14249 df = pd.DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]}) if one_row: df = df.iloc[[0]] observed = df.set_index(["a", "b"]).to_latex() expected = r"""\begin{tabular}{llr} \toprule & & c \\ a & b & \\ \midrule NaN & 2 & 4 \\ """ if not one_row: expected += r"""1.0 & 3 & 5 \\ """ expected += r"""\bottomrule \end{tabular} """ assert observed == expected def test_to_latex_non_string_index(self): # GH 19981 observed = pd.DataFrame([[1, 2, 3]] * 2).set_index([0, 1]).to_latex() expected = r"""\begin{tabular}{llr} \toprule & & 2 \\ 0 & 1 & \\ \midrule 1 & 2 & 3 \\ & 2 & 3 \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_midrule_location(self): # GH 18326 df = pd.DataFrame({"a": [1, 2]}) df.index.name = "foo" observed = df.to_latex(index_names=False) expected = r"""\begin{tabular}{lr} \toprule {} & a \\ \midrule 0 & 1 \\ 1 & 2 \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_multiindex_empty_name(self): # GH 18669 mi = pd.MultiIndex.from_product([[1, 2]], names=[""]) df = pd.DataFrame(-1, index=mi, columns=range(4)) observed = df.to_latex() expected = r"""\begin{tabular}{lrrrr} \toprule & 0 & 1 & 2 & 3 \\ {} & & & & \\ \midrule 1 & -1 & -1 & -1 & -1 \\ 2 & -1 & -1 & -1 & -1 \\ \bottomrule \end{tabular} """ assert observed == expected def test_to_latex_float_format_no_fixed_width(self): # GH 21625 df = DataFrame({"x": [0.19999]}) expected = r"""\begin{tabular}{lr} \toprule {} & x \\ \midrule 0 & 0.200 \\ \bottomrule \end{tabular} """ assert df.to_latex(float_format="%.3f") == expected # GH 22270 df = DataFrame({"x": [100.0]}) expected = r"""\begin{tabular}{lr} \toprule {} & x \\ \midrule 0 & 100 \\ \bottomrule \end{tabular} """ assert df.to_latex(float_format="%.0f") == expected def test_to_latex_multindex_header(self): # GH 16718 df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index( ["a", "b"] ) observed = df.to_latex(header=["r1", "r2"]) expected = r"""\begin{tabular}{llrr} \toprule & & r1 & r2 \\ a & b & & \\ \midrule 0 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ assert observed == expected
bsd-3-clause
mjirik/lisa
experiments/precise_liver_statistics.py
1
8637
#! /usr/bin/python # -*- coding: utf-8 -*- """ Experiment s \"barevným\" modelem jater. Pro spuštění zkuste help: python experiments/20130919_liver_statistics.py --help Měřené vlastnosti se přidávají do get_features(). Pro přidání dalších dat, editujte příslušný yaml soubor. """ # import funkcí z jiného adresáře import sys import os.path path_to_script = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(path_to_script, "../src/")) sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src")) sys.path.append(os.path.join(path_to_script, "../extern/sed3/")) sys.path.append(os.path.join(path_to_script, "../extern/lbp/")) #sys.path.append(os.path.join(path_to_script, "../extern/")) #import featurevector from loguru import logger # logger = logging.getLogger() #import apdb # apdb.set_trace(); #import scipy.io import numpy as np #import scipy #from scipy import sparse #import traceback import itertools # ----------------- my scripts -------- import sed3 #import dcmreaddata1 as dcmr #import dcmreaddata as dcmr import argparse #import sed3 import misc import datareader import matplotlib.pyplot as plt import experiments def feat_hist_by_segmentation(data3d_orig, data3d_seg, visualization=True): bins = range(-1024, 1024, 1) bins = range(-512, 512, 1) hist1, bin_edges1 = np.histogram(data3d_orig[data3d_seg > 0], bins=bins) hist2, bin_edges2 = np.histogram(data3d_orig[data3d_seg <= 0], bins=bins) #import pdb; pdb.set_trace() if visualization: plt_liver = plt.step(bin_edges1[1:], hist1) plt_rest = plt.step(bin_edges2[1:], hist2) plt.legend([plt_liver, plt_rest], ['Liver', 'Other tissue']) #plt.plot(bin_edges1[1:], hist1, bin_edges2[1:], hist2) plt.show() fv_hist = {'hist1': hist1, 'hist2': hist2, 'bins': bins } return fv_hist def feat_hist(data3d_orig): bins = range(-1024, 1024, 1) bins = range(-512, 512, 1) bins = range(-512, 512, 10) bins = range(-512, 512, 64) bins = range(-512, 512, 100) hist1, bin_edges1 = np.histogram(data3d_orig, bins=bins) return hist1 def get_features(data3d_orig, data3d_seg, feature_fcn, visualization=True): u""" Sem doplníme všechny naše měření. Pro ukázku jsem vytvořil měření histogramu. data3d_orig: CT data3d_orig data3d_seg: jedničky tam, kde jsou játra """ featur = feature_fcn(data3d_orig) #featur = {} #featur['hist'] = feat_hist(data3d_orig, visualization) #featur['lbp'] = lbp(data3d_orig, data3d_seg, visualization) return featur def sample_input_data(): inputdata = { 'basedir': '/home/mjirik/data/medical/', 'data': [ {'sliverseg': 'data_orig/sliver07/training-part1/liver-seg001.mhd', 'sliverorig': 'data_orig/sliver07/training-part1/liver-orig001.mhd'}, {'sliverseg': 'data_orig/sliver07/training-part1/liver-seg002.mhd', 'sliverorig': 'data_orig/sliver07/training-part1/liver-orig002.mhd'}, ] } sample_data_file = os.path.join(path_to_script, "20130919_liver_statistics.yaml") #print sample_data_file, path_to_script misc.obj_to_file(inputdata, sample_data_file, filetype='yaml') #def voe_metric(vol1, vol2, voxelsize_mm): def write_csv(data, filename="20130919_liver_statistics.yaml"): import csv with open(filename, 'wb') as csvfile: spamwriter = csv.writer(csvfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL ) for label in data: spamwriter.writerow([label] + data[label]) #spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam']) #def training_dataset_prepare def read_data_orig_and_seg(inputdata, i): """ Loads data_orig and data_seg from yaml file """ reader = datareader.DataReader() data3d_a_path = os.path.join(inputdata['basedir'], inputdata['data'][i]['sliverseg']) data3d_a, metadata_a = reader.Get3DData(data3d_a_path, dataplus_format=False) data3d_b_path = os.path.join(inputdata['basedir'], inputdata['data'][i]['sliverorig']) data3d_b, metadata_b = reader.Get3DData(data3d_b_path, dataplus_format=False) #import pdb; pdb.set_trace() data3d_seg = (data3d_a > 0).astype(np.int8) data3d_orig = data3d_b return data3d_orig, data3d_seg def one_experiment_setting_for_whole_dataset(inputdata, tile_shape, feature_fcn, classif_fcn, train, visualization=False): fvall = [] fv_tiles = [] indata_len = len(inputdata['data']) indata_len = 3 for i in range(0, indata_len): data3d_orig, data3d_seg = read_data_orig_and_seg(inputdata, i) feat_hist_by_segmentation(data3d_orig, data3d_seg, visualization) if visualization: pyed = sed3.sed3(data3d_orig, contour=data3d_seg) pyed.show() #import pdb; pdb.set_trace() #fvall.insert(i, get_features( # data3d_orig, #ltl = (labels_train_lin_float * 10).astype(np.int8) #labels_train = arrange_to_tiled_data(cidxs, tile_shape, # d_shp, ltl) #pyed = sed3.sed3(labels_train, contour=labels) # @TODO vracet něco inteligentního, fvall je prázdný return fvall def make_product_list(list_of_feature_fcn, list_of_classifiers): # TODO work with list_of_feature_fcn and list_of_classifiers featrs_plus_classifs = itertools.product(list_of_feature_fcn, list_of_classifiers) return featrs_plus_classifs def experiment(path_to_yaml, featrs_plus_classifs, tile_shape, visualization=False, train=False): inputdata = misc.obj_from_file(path_to_yaml, filetype='yaml') #import ipdb; ipdb.set_trace() # noqa BREAKPOINT results = [] for fpc in featrs_plus_classifs: feature_fcn = fpc[0] classif_fcn = fpc[1] fvall = one_experiment_setting_for_whole_dataset( inputdata, tile_shape, feature_fcn, classif_fcn, train, visualization) result = {'params': str(fpc), 'fvall': fvall} results.append(result) print(results) return results def main(): ## logger = logging.getLogger() # logger = logging.getLogger() logger.setLevel(logging.WARNING) ch = logging.StreamHandler() logger.addHandler(ch) #logger.debug('input params') parser = argparse.ArgumentParser( description='Compute features on liver and other tissue.') parser.add_argument('-si', '--sampleInput', action='store_true', help='generate sample intput data', default=False) parser.add_argument('-v', '--visualization', action='store_true', help='Turn on visualization', default=False) parser.add_argument('-i', '--input', help='input yaml file', default="20130919_liver_statistics.yaml") parser.add_argument('-o', '--output', help='output file', default="20130919_liver_statistics_results.pkl") parser.add_argument('-t', '--train', help='Training', default=False, action='store_true' ) args = parser.parse_args() if args.sampleInput: sample_input_data() # input parser #path_to_yaml = os.path.join(path_to_script, args.input) path_to_yaml = args.input #write_csv(fvall) list_of_feature_fcn = [feat_hist] from sklearn import svm from sklearn.naive_bayes import GaussianNB list_of_classifiers = [svm.SVC, GaussianNB] tile_shape = [1, 100, 100] featrs_plus_classifs = make_product_list(list_of_feature_fcn, list_of_classifiers) result = experiment(path_to_yaml, featrs_plus_classifs, tile_shape=tile_shape, visualization=args.visualization, train=args.train) # Ukládání výsledku do souboru output_file = os.path.join(path_to_script, args.output) misc.obj_to_file(result, output_file, filetype='pickle') if __name__ == "__main__": main()
bsd-3-clause
JT5D/scikit-learn
sklearn/preprocessing/tests/test_imputation.py
2
11013
import numpy as np from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.preprocessing.imputation import Imputer from sklearn.pipeline import Pipeline from sklearn import grid_search from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test: - along the two axes - with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "axis = {0}, sparse = {1}" % (strategy, missing_values) # Normal matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) X_trans = imputer.fit(X).transform(X.copy()) assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, False)) assert_array_equal(X_trans, X_true, err_msg.format(0, False)) # Normal matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(X.transpose()) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, X.copy().transpose()) else: X_trans = imputer.transform(X.copy().transpose()) assert_array_equal(imputer.statistics_, statistics, err_msg.format(1, False)) assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, False)) # Sparse matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, True)) assert_array_equal(X_trans, X_true, err_msg.format(0, True)) # Sparse matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(sparse.csc_matrix(X.transpose())) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, sparse.csc_matrix(X.copy().transpose())) else: X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(imputer.statistics_, statistics, err_msg.format(1, True)) assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, True)) def test_imputation_shape(): """Verify the shapes of the imputed matrix for different strategies.""" X = np.random.randn(10, 2) X[::2] = np.nan for strategy in ['mean', 'median', 'most_frequent']: imputer = Imputer(strategy=strategy) X_imputed = imputer.fit_transform(X) assert_equal(X_imputed.shape, (10, 2)) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert_equal(X_imputed.shape, (10, 2)) def test_imputation_mean_median_only_zero(): """Test imputation using the mean and median strategies, when missing_values == 0.""" X = np.array([ [np.nan, 0, 0, 0, 5], [np.nan, 1, 0, np.nan, 3], [np.nan, 2, 0, 0, 0], [np.nan, 6, 0, 5, 13], ]) X_imputed_mean = np.array([ [3, 5], [1, 3], [2, 7], [6, 13], ]) statistics_mean = [np.nan, 3, np.nan, np.nan, 7] X_imputed_median = np.array([ [2, 5, 5], [1, np.nan, 3], [2, 5, 5], [6, 5, 13], ]) statistics_median = [np.nan, 2, np.nan, 5, 5] _check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0) _check_statistics(X, X_imputed_median, "median", statistics_median, 0) def test_imputation_mean_median(): """Test imputation using the mean and median strategies, when missing_values != 0.""" rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0]+1) values[4::2] = - values[4::2] tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))), ("mean", 0, lambda z, v, p: np.mean(v)), ("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))), ("median", 0, lambda z, v, p: np.median(v))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_most_frequent(): """Test imputation using the most-frequent strategy.""" X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in Imputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, Imputer will need to be updated # to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) def test_imputation_pipeline_grid_search(): """Test imputation within a pipeline + gridsearch.""" pipeline = Pipeline([('imputer', Imputer(missing_values=0)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"], 'imputer__axis': [0, 1] } l = 100 X = sparse_random_matrix(l, l, density=0.10) Y = sparse_random_matrix(l, 1, density=0.10).todense() gs = grid_search.GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_pickle(): """Test for pickling imputers.""" import pickle l = 100 X = sparse_random_matrix(l, l, density=0.10) for strategy in ["mean", "median", "most_frequent"]: imputer = Imputer(missing_values=0, strategy=strategy) imputer.fit(X) imputer_pickled = pickle.loads(pickle.dumps(imputer)) assert_array_equal(imputer.transform(X.copy()), imputer_pickled.transform(X.copy()), "Fail to transform the data after pickling " "(strategy = %s)" % (strategy)) def test_imputation_copy(): """Test imputation with copy""" X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().todense() imputer = Imputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_false(np.all(X == Xt)) # copy=True, sparse csr => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, dense => no copy X = X_orig.copy().todense() imputer = Imputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_true(np.all(X == Xt)) # copy=False, sparse csr, axis=1 => no copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_true(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=0 => no copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_true(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=1 => copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=1, missing_values=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=0, strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) assert_false(sparse.issparse(Xt)) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False.
bsd-3-clause
sthenc/nc_packer
visual/mfcc_visualize.py
1
2726
#!/usr/bin/python import numpy as np import matplotlib as ml import matplotlib.pyplot as plt import features import scipy.io.wavfile as wav from wav2mfcc import wav2mfcc #if __name__ == "__main__": datadir = '/mnt/data/Fer/diplomski/nc_packer/test_data/PCCdata16kHz/devel/isolated/' cleandir = 'clean/' lilnoise = 'm3dB/' #fajl = 's1_bgaa9a.wav' fajl = 's2_sgiv1p.wav' # Input #0, wav, from 's1_bgaa9a.wav': # Duration: 00:00:01.25, bitrate: 512 kb/s # Stream #0.0: Audio: pcm_s16le, 16000 Hz, stereo, s16, 512 kb/s # compute my mfcc features for this file from mean_stddev import means, stddevs #means[0] = 0 #stddevs[0] = 1 mfcc_clean_not_norm = wav2mfcc(datadir + cleandir + fajl) mfcc_clean = (mfcc_clean_not_norm - means) / stddevs mfcc_noisy_not_norm = wav2mfcc(datadir + lilnoise + fajl) mfcc_noisy = (mfcc_noisy_not_norm - means) / stddevs # mine .nc file import netCDF4 # ds = netCDF4.Dataset('./train_1_speaker.nc') ds = netCDF4.Dataset('./val_1_speaker.nc') #print(ds) dslen = ds.variables['seqLengths'] dsin = ds.variables['inputs'] dsout = ds.variables['targetPatterns'] print(dslen[:]) exstart = 0 #dslen[0] exend = exstart + dslen[0] #dslen[1] ds_mfcc_clean = dsin [exstart:exend, :] ds_mfcc_noisy = dsout[exstart:exend, :] # plot the mfcc features from original chime file and compare to the ones in the .nc database ml.rcParams['image.cmap'] = 'nipy_spectral' fig, axes = plt.subplots(nrows=4, ncols=1) vmin = min(min(np.amin(mfcc_clean), np.amin(mfcc_noisy)), min(np.amin(ds_mfcc_clean), np.amin(ds_mfcc_noisy))) vmax = max(max(np.amax(mfcc_clean), np.amax(mfcc_noisy)), max(np.amax(ds_mfcc_clean), np.amax(ds_mfcc_noisy))) print(min(np.amin(mfcc_clean), np.amin(mfcc_noisy))) print(max(np.amax(mfcc_clean), np.amax(mfcc_noisy))) axes[0].pcolormesh(mfcc_clean.transpose(), vmin=vmin, vmax=vmax) axes[0].set_aspect('equal') axes[1].pcolormesh(mfcc_noisy.transpose(), vmin=vmin, vmax=vmax) axes[1].set_aspect('equal') axes[2].pcolormesh(ds_mfcc_clean.transpose(), vmin=vmin, vmax=vmax) axes[2].set_aspect('equal') a= axes[3].pcolormesh(ds_mfcc_noisy.transpose(), vmin=vmin, vmax=vmax) axes[3].set_aspect('equal') # add colorbar fig.subplots_adjust(right=0.8) cax = fig.add_axes([0.80, 0.05, 0.1, 0.9], aspect='auto', frameon=False, xticks=[], yticks=[]) cax.get_xaxis().set_visible(False) cax.get_yaxis().set_visible(False) cax.patch.set_alpha(0) cax.set_frame_on(False) plt.colorbar(a, orientation='vertical', ax=cax, aspect=60) plt.tight_layout() #plt.show() import time import datetime ts = time.time() stamp = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d_%H_%M_%S') plt.savefig('figures/mfcc_compare_' + stamp + '.png', bbox_inches='tight')
mit
dsm054/pandas
pandas/plotting/_style.py
2
5527
# being a bit too dynamic # pylint: disable=E1101 from __future__ import division import warnings from contextlib import contextmanager import numpy as np from pandas.core.dtypes.common import is_list_like from pandas.compat import lrange, lmap import pandas.compat as compat def _get_standard_colors(num_colors=None, colormap=None, color_type='default', color=None): import matplotlib.pyplot as plt if color is None and colormap is not None: if isinstance(colormap, compat.string_types): import matplotlib.cm as cm cmap = colormap colormap = cm.get_cmap(colormap) if colormap is None: raise ValueError("Colormap {0} is not recognized".format(cmap)) colors = lmap(colormap, np.linspace(0, 1, num=num_colors)) elif color is not None: if colormap is not None: warnings.warn("'color' and 'colormap' cannot be used " "simultaneously. Using 'color'") colors = list(color) if is_list_like(color) else color else: if color_type == 'default': # need to call list() on the result to copy so we don't # modify the global rcParams below try: colors = [c['color'] for c in list(plt.rcParams['axes.prop_cycle'])] except KeyError: colors = list(plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))) if isinstance(colors, compat.string_types): colors = list(colors) elif color_type == 'random': import pandas.core.common as com def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed rs = com.random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) else: raise ValueError("color_type must be either 'default' or 'random'") if isinstance(colors, compat.string_types): import matplotlib.colors conv = matplotlib.colors.ColorConverter() def _maybe_valid_colors(colors): try: [conv.to_rgba(c) for c in colors] return True except ValueError: return False # check whether the string can be convertible to single color maybe_single_color = _maybe_valid_colors([colors]) # check whether each character can be convertible to colors maybe_color_cycle = _maybe_valid_colors(list(colors)) if maybe_single_color and maybe_color_cycle and len(colors) > 1: hex_color = [c['color'] for c in list(plt.rcParams['axes.prop_cycle'])] colors = [hex_color[int(colors[1])]] elif maybe_single_color: colors = [colors] else: # ``colors`` is regarded as color cycle. # mpl will raise error any of them is invalid pass if len(colors) != num_colors: try: multiple = num_colors // len(colors) - 1 except ZeroDivisionError: raise ValueError("Invalid color argument: ''") mod = num_colors % len(colors) colors += multiple * colors colors += colors[:mod] return colors class _Options(dict): """ Stores pandas plotting options. Allows for parameter aliasing so you can just use parameter names that are the same as the plot function parameters, but is stored in a canonical format that makes it easy to breakdown into groups later """ # alias so the names are same as plotting method parameter names _ALIASES = {'x_compat': 'xaxis.compat'} _DEFAULT_KEYS = ['xaxis.compat'] def __init__(self, deprecated=False): self._deprecated = deprecated # self['xaxis.compat'] = False super(_Options, self).__setitem__('xaxis.compat', False) def __getitem__(self, key): key = self._get_canonical_key(key) if key not in self: raise ValueError( '{key} is not a valid pandas plotting option'.format(key=key)) return super(_Options, self).__getitem__(key) def __setitem__(self, key, value): key = self._get_canonical_key(key) return super(_Options, self).__setitem__(key, value) def __delitem__(self, key): key = self._get_canonical_key(key) if key in self._DEFAULT_KEYS: raise ValueError( 'Cannot remove default parameter {key}'.format(key=key)) return super(_Options, self).__delitem__(key) def __contains__(self, key): key = self._get_canonical_key(key) return super(_Options, self).__contains__(key) def reset(self): """ Reset the option store to its initial state Returns ------- None """ self.__init__() def _get_canonical_key(self, key): return self._ALIASES.get(key, key) @contextmanager def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value plot_params = _Options()
bsd-3-clause
doublsky/MLProfile
benchmark/bench_bayes.py
1
1231
""" Benchmark Naive Bayes """ from util import * import numpy as np import argparse from time import time from sklearn import naive_bayes import sys if __name__ == '__main__': parser = argparse.ArgumentParser(description="Benchmark naive bayes.") parser.add_argument('-ns', default=1000000, type=int, help="Number of samples to be generated and fit.") parser.add_argument('-nf', default=100, type=int, help="Number of features to be generated and fit.") args = parser.parse_args() print >> sys.stderr, "- loading data..." start_time = time() X_name = "dataset/clfX_ns"+str(args.ns)+"_nf"+str(args.nf)+".npy" X = np.load(X_name) y_name = "dataset/clfy_ns"+str(args.ns)+"_nf"+str(args.nf)+".npy" y = np.load(y_name) data_loading_time = time() - start_time print >> sys.stderr, "- data loading time:", data_loading_time print >> sys.stderr, "- benchmark naive bayes with", args.ns, "samples,", args.nf, "features..." clf = naive_bayes.GaussianNB() start_time = time() for _ in range(10): clf.fit(X, y) fit_time = time() - start_time print >> sys.stderr, "- benchmark finished, fitting time:", fit_time
mit
ychfan/tensorflow
tensorflow/python/estimator/canned/linear_testing_utils.py
20
67865
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for testing linear estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import shutil import tempfile import numpy as np import six from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.estimator import estimator from tensorflow.python.estimator import run_config from tensorflow.python.estimator.canned import linear from tensorflow.python.estimator.canned import metric_keys from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.estimator.inputs import pandas_io from tensorflow.python.feature_column import feature_column as feature_column_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import input as input_lib from tensorflow.python.training import optimizer from tensorflow.python.training import queue_runner from tensorflow.python.training import saver from tensorflow.python.training import session_run_hook try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False # pylint rules which are disabled by default for test files. # pylint: disable=invalid-name,protected-access,missing-docstring # Names of variables created by model. AGE_WEIGHT_NAME = 'linear/linear_model/age/weights' HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights' BIAS_NAME = 'linear/linear_model/bias_weights' LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights' def assert_close(expected, actual, rtol=1e-04, name='assert_close'): with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope: expected = ops.convert_to_tensor(expected, name='expected') actual = ops.convert_to_tensor(actual, name='actual') rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected) rtol = ops.convert_to_tensor(rtol, name='rtol') return check_ops.assert_less( rdiff, rtol, data=('Condition expected =~ actual did not hold element-wise:' 'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff, 'rtol = ', rtol,), name=scope) def save_variables_to_ckpt(model_dir): init_all_op = [variables.global_variables_initializer()] with tf_session.Session() as sess: sess.run(init_all_op) saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt')) def queue_parsed_features(feature_map): tensors_to_enqueue = [] keys = [] for key, tensor in six.iteritems(feature_map): keys.append(key) tensors_to_enqueue.append(tensor) queue_dtypes = [x.dtype for x in tensors_to_enqueue] input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes) queue_runner.add_queue_runner( queue_runner.QueueRunner(input_queue, [input_queue.enqueue(tensors_to_enqueue)])) dequeued_tensors = input_queue.dequeue() return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))} def sorted_key_dict(unsorted_dict): return {k: unsorted_dict[k] for k in sorted(unsorted_dict)} def sigmoid(x): return 1 / (1 + np.exp(-1.0 * x)) class CheckPartitionerVarHook(session_run_hook.SessionRunHook): """A `SessionRunHook` to check a partitioned variable.""" def __init__(self, test_case, var_name, var_dim, partitions): self._test_case = test_case self._var_name = var_name self._var_dim = var_dim self._partitions = partitions def begin(self): with variable_scope.variable_scope( variable_scope.get_variable_scope()) as scope: scope.reuse_variables() partitioned_weight = variable_scope.get_variable( self._var_name, shape=(self._var_dim, 1)) self._test_case.assertTrue( isinstance(partitioned_weight, variables.PartitionedVariable)) for part in partitioned_weight: self._test_case.assertEqual(self._var_dim // self._partitions, part.get_shape()[0]) class BaseLinearRegressorPartitionerTest(object): def __init__(self, linear_regressor_fn): self._linear_regressor_fn = linear_regressor_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def testPartitioner(self): x_dim = 64 partitions = 4 def _partitioner(shape, dtype): del dtype # unused; required by Fn signature. # Only partition the embedding tensor. return [partitions, 1] if shape[0] == x_dim else [1] regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.categorical_column_with_hash_bucket( 'language', hash_bucket_size=x_dim),), partitioner=_partitioner, model_dir=self._model_dir) def _input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english', 'spanish'], indices=[[0, 0], [0, 1]], dense_shape=[1, 2]) }, [[10.]] hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim, partitions) regressor.train(input_fn=_input_fn, steps=1, hooks=[hook]) def testDefaultPartitionerWithMultiplePsReplicas(self): partitions = 2 # This results in weights larger than the default partition size of 64M, # so partitioned weights are created (each weight uses 4 bytes). x_dim = 32 << 20 class FakeRunConfig(run_config.RunConfig): @property def num_ps_replicas(self): return partitions # Mock the device setter as ps is not available on test machines. with test.mock.patch.object( estimator, '_get_replica_device_setter', return_value=lambda _: '/cpu:0'): linear_regressor = self._linear_regressor_fn( feature_columns=( feature_column_lib.categorical_column_with_hash_bucket( 'language', hash_bucket_size=x_dim),), config=FakeRunConfig(), model_dir=self._model_dir) def _input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english', 'spanish'], indices=[[0, 0], [0, 1]], dense_shape=[1, 2]) }, [[10.]] hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim, partitions) linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook]) # TODO(b/36813849): Add tests with dynamic shape inputs using placeholders. class BaseLinearRegressorEvaluationTest(object): def __init__(self, linear_regressor_fn): self._linear_regressor_fn = linear_regressor_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def test_evaluation_for_simple_data(self): with ops.Graph().as_default(): variables.Variable([[11.0]], name=AGE_WEIGHT_NAME) variables.Variable([2.0], name=BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate( input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9. self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 9., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_batch(self): """Tests evaluation for batch_size==2.""" with ops.Graph().as_default(): variables.Variable([[11.0]], name=AGE_WEIGHT_NAME) variables.Variable([2.0], name=BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate( input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. # Loss per example is 3**2 = 9. # Training loss is the sum over batch = 9 + 9 = 18 # Average loss is the average over batch = 9 self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 18., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_weights(self): """Tests evaluation with weights.""" with ops.Graph().as_default(): variables.Variable([[11.0]], name=AGE_WEIGHT_NAME) variables.Variable([2.0], name=BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) def _input_fn(): features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))} labels = ((10.,), (10.,)) return features, labels linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), weight_column='weights', model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. # Loss per example is 3**2 = 9. # Training loss is the weighted sum over batch = 9 + 2*9 = 27 # average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9 self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 27., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_for_multi_dimensions(self): x_dim = 3 label_dim = 2 with ops.Graph().as_default(): variables.Variable( [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME) variables.Variable([7.0, 8.0], name=BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column( 'age', shape=(x_dim,)),), label_dimension=label_dim, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={ 'age': np.array([[2., 4., 5.]]), }, y=np.array([[46., 58.]]), batch_size=1, num_epochs=None, shuffle=False) eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1) self.assertItemsEqual( (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN, ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys()) # Logit is # [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0] # [3.0, 4.0] # [5.0, 6.0] # which is [46, 58] self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS]) def test_evaluation_for_multiple_feature_columns(self): with ops.Graph().as_default(): variables.Variable([[10.0]], name=AGE_WEIGHT_NAME) variables.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME) variables.Variable([5.0], name=BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) batch_size = 2 feature_columns = [ feature_column_lib.numeric_column('age'), feature_column_lib.numeric_column('height') ] input_fn = numpy_io.numpy_input_fn( x={'age': np.array([20, 40]), 'height': np.array([4, 8])}, y=np.array([[213.], [421.]]), batch_size=batch_size, num_epochs=None, shuffle=False) est = self._linear_regressor_fn( feature_columns=feature_columns, model_dir=self._model_dir) eval_metrics = est.evaluate(input_fn=input_fn, steps=1) self.assertItemsEqual( (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN, ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys()) # Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] = # [213.0, 421.0], while label is [213., 421.]. Loss = 0. self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS]) class BaseLinearRegressorPredictTest(object): def __init__(self, linear_regressor_fn): self._linear_regressor_fn = linear_regressor_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def test_1d(self): """Tests predict when all variables are one-dimensional.""" with ops.Graph().as_default(): variables.Variable([[10.]], name='linear/linear_model/x/weights') variables.Variable([.2], name=BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('x'),), model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[2.]])}, y=None, batch_size=1, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # x * weight + bias = 2. * 10. + .2 = 20.2 self.assertAllClose([[20.2]], predicted_scores) def testMultiDim(self): """Tests predict when all variables are multi-dimenstional.""" batch_size = 2 label_dimension = 3 x_dim = 4 feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),) with ops.Graph().as_default(): variables.Variable( # shape=[x_dim, label_dimension] [[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]], name='linear/linear_model/x/weights') variables.Variable( # shape=[label_dimension] [.2, .4, .6], name=BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( # x shape=[batch_size, x_dim] x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])}, y=None, batch_size=batch_size, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # score = x * weight + bias, shape=[batch_size, label_dimension] self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]], predicted_scores) def testTwoFeatureColumns(self): """Tests predict with two feature columns.""" with ops.Graph().as_default(): variables.Variable([[10.]], name='linear/linear_model/x0/weights') variables.Variable([[20.]], name='linear/linear_model/x1/weights') variables.Variable([.2], name=BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1')), model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( x={'x0': np.array([[2.]]), 'x1': np.array([[3.]])}, y=None, batch_size=1, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2 self.assertAllClose([[80.2]], predicted_scores) class BaseLinearRegressorIntegrationTest(object): def __init__(self, linear_regressor_fn): self._linear_regressor_fn = linear_regressor_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, label_dimension, prediction_length): feature_columns = [ feature_column_lib.numeric_column('x', shape=(input_dimension,)) ] est = self._linear_regressor_fn( feature_columns=feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) # TRAIN # learn y = x est.train(train_input_fn, steps=200) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores)) # PREDICT predictions = np.array( [x['predictions'] for x in est.predict(predict_input_fn)]) self.assertAllEqual((prediction_length, label_dimension), predictions.shape) # EXPORT feature_spec = feature_column_lib.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" label_dimension = 2 input_dimension = label_dimension batch_size = 10 prediction_length = batch_size data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=1, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=None, batch_size=batch_size, num_epochs=1, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, label_dimension=label_dimension, prediction_length=prediction_length) def test_pandas_input_fn(self): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return # Pandas DataFrame natually supports 1 dim data only. label_dimension = 1 input_dimension = label_dimension batch_size = 10 data = np.array([1., 2., 3., 4.], dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(data) prediction_length = 4 train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, label_dimension=label_dimension, prediction_length=prediction_length) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" label_dimension = 2 input_dimension = label_dimension batch_size = 10 prediction_length = batch_size data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=datum)), 'y': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=datum[:label_dimension])), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=input_dimension, label_dimension=label_dimension, prediction_length=prediction_length) class BaseLinearRegressorTrainingTest(object): def __init__(self, linear_regressor_fn): self._linear_regressor_fn = linear_regressor_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _mock_optimizer(self, expected_loss=None): expected_var_names = [ '%s/part_0:0' % AGE_WEIGHT_NAME, '%s/part_0:0' % BIAS_NAME ] def _minimize(loss, global_step=None, var_list=None): trainable_vars = var_list or ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual(expected_var_names, [var.name for var in trainable_vars]) # Verify loss. We can't check the value directly, so we add an assert op. self.assertEquals(0, loss.shape.ndims) if expected_loss is None: if global_step is not None: return state_ops.assign_add(global_step, 1).op return control_flow_ops.no_op() assert_loss = assert_close( math_ops.to_float(expected_loss, name='expected'), loss, name='assert_loss') with ops.control_dependencies((assert_loss,)): if global_step is not None: return state_ops.assign_add(global_step, 1).op return control_flow_ops.no_op() mock_optimizer = test.mock.NonCallableMock( spec=optimizer.Optimizer, wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer')) mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize) # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks. # So, return mock_optimizer itself for deepcopy. mock_optimizer.__deepcopy__ = lambda _: mock_optimizer return mock_optimizer def _assert_checkpoint(self, expected_global_step, expected_age_weight=None, expected_bias=None): shapes = { name: shape for (name, shape) in checkpoint_utils.list_variables(self._model_dir) } self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP]) self.assertEqual(expected_global_step, checkpoint_utils.load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)) self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME]) if expected_age_weight is not None: self.assertEqual(expected_age_weight, checkpoint_utils.load_variable(self._model_dir, AGE_WEIGHT_NAME)) self.assertEqual([1], shapes[BIAS_NAME]) if expected_bias is not None: self.assertEqual(expected_bias, checkpoint_utils.load_variable(self._model_dir, BIAS_NAME)) def testFromScratchWithDefaultOptimizer(self): # Create LinearRegressor. label = 5. age = 17 linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) # Train for a few steps, and validate final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self._assert_checkpoint(num_steps) def testTrainWithOneDimLabel(self): label_dimension = 1 batch_size = 20 feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))] est = self._linear_regressor_fn( feature_columns=feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32) self.assertEqual((batch_size,), data_rank_1.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1}, y=data_rank_1, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(200) def testTrainWithOneDimWeight(self): label_dimension = 1 batch_size = 20 feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))] est = self._linear_regressor_fn( feature_columns=feature_columns, label_dimension=label_dimension, weight_column='w', model_dir=self._model_dir) data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32) self.assertEqual((batch_size,), data_rank_1.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(200) def testFromScratch(self): # Create LinearRegressor. label = 5. age = 17 # loss = (logits - label)^2 = (0 - 5.)^2 = 25. mock_optimizer = self._mock_optimizer(expected_loss=25.) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( expected_global_step=num_steps, expected_age_weight=0., expected_bias=0.) def testFromCheckpoint(self): # Create initial checkpoint. age_weight = 10.0 bias = 5.0 initial_global_step = 100 with ops.Graph().as_default(): variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME) variables.Variable([bias], name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) # logits = age * age_weight + bias = 17 * 10. + 5. = 175 # loss = (logits - label)^2 = (175 - 5)^2 = 28900 mock_optimizer = self._mock_optimizer(expected_loss=28900.) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) def testFromCheckpointMultiBatch(self): # Create initial checkpoint. age_weight = 10.0 bias = 5.0 initial_global_step = 100 with ops.Graph().as_default(): variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME) variables.Variable([bias], name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) # logits = age * age_weight + bias # logits[0] = 17 * 10. + 5. = 175 # logits[1] = 15 * 10. + 5. = 155 # loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004 mock_optimizer = self._mock_optimizer(expected_loss=52004.) linear_regressor = self._linear_regressor_fn( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) class BaseLinearClassifierTrainingTest(object): def __init__(self, linear_classifier_fn): self._linear_classifier_fn = linear_classifier_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _mock_optimizer(self, expected_loss=None): expected_var_names = [ '%s/part_0:0' % AGE_WEIGHT_NAME, '%s/part_0:0' % BIAS_NAME ] def _minimize(loss, global_step): trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual( expected_var_names, [var.name for var in trainable_vars]) # Verify loss. We can't check the value directly, so we add an assert op. self.assertEquals(0, loss.shape.ndims) if expected_loss is None: return state_ops.assign_add(global_step, 1).op assert_loss = assert_close( math_ops.to_float(expected_loss, name='expected'), loss, name='assert_loss') with ops.control_dependencies((assert_loss,)): return state_ops.assign_add(global_step, 1).op mock_optimizer = test.mock.NonCallableMock( spec=optimizer.Optimizer, wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer')) mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize) # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks. # So, return mock_optimizer itself for deepcopy. mock_optimizer.__deepcopy__ = lambda _: mock_optimizer return mock_optimizer def _assert_checkpoint( self, n_classes, expected_global_step, expected_age_weight=None, expected_bias=None): logits_dimension = n_classes if n_classes > 2 else 1 shapes = { name: shape for (name, shape) in checkpoint_utils.list_variables(self._model_dir) } self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP]) self.assertEqual( expected_global_step, checkpoint_utils.load_variable( self._model_dir, ops.GraphKeys.GLOBAL_STEP)) self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME]) if expected_age_weight is not None: self.assertAllEqual(expected_age_weight, checkpoint_utils.load_variable( self._model_dir, AGE_WEIGHT_NAME)) self.assertEqual([logits_dimension], shapes[BIAS_NAME]) if expected_bias is not None: self.assertAllEqual(expected_bias, checkpoint_utils.load_variable( self._model_dir, BIAS_NAME)) def _testFromScratchWithDefaultOptimizer(self, n_classes): label = 0 age = 17 est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, model_dir=self._model_dir) # Train for a few steps, and validate final checkpoint. num_steps = 10 est.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self._assert_checkpoint(n_classes, num_steps) def testBinaryClassesFromScratchWithDefaultOptimizer(self): self._testFromScratchWithDefaultOptimizer(n_classes=2) def testMultiClassesFromScratchWithDefaultOptimizer(self): self._testFromScratchWithDefaultOptimizer(n_classes=4) def _testTrainWithTwoDimsLabel(self, n_classes): batch_size = 20 est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, model_dir=self._model_dir) data_rank_1 = np.array([0, 1]) data_rank_2 = np.array([[0], [1]]) self.assertEqual((2,), data_rank_1.shape) self.assertEqual((2, 1), data_rank_2.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1}, y=data_rank_2, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(n_classes, 200) def testBinaryClassesTrainWithTwoDimsLabel(self): self._testTrainWithTwoDimsLabel(n_classes=2) def testMultiClassesTrainWithTwoDimsLabel(self): self._testTrainWithTwoDimsLabel(n_classes=4) def _testTrainWithOneDimLabel(self, n_classes): batch_size = 20 est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, model_dir=self._model_dir) data_rank_1 = np.array([0, 1]) self.assertEqual((2,), data_rank_1.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1}, y=data_rank_1, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(n_classes, 200) def testBinaryClassesTrainWithOneDimLabel(self): self._testTrainWithOneDimLabel(n_classes=2) def testMultiClassesTrainWithOneDimLabel(self): self._testTrainWithOneDimLabel(n_classes=4) def _testTrainWithTwoDimsWeight(self, n_classes): batch_size = 20 est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), weight_column='w', n_classes=n_classes, model_dir=self._model_dir) data_rank_1 = np.array([0, 1]) data_rank_2 = np.array([[0], [1]]) self.assertEqual((2,), data_rank_1.shape) self.assertEqual((2, 1), data_rank_2.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(n_classes, 200) def testBinaryClassesTrainWithTwoDimsWeight(self): self._testTrainWithTwoDimsWeight(n_classes=2) def testMultiClassesTrainWithTwoDimsWeight(self): self._testTrainWithTwoDimsWeight(n_classes=4) def _testTrainWithOneDimWeight(self, n_classes): batch_size = 20 est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), weight_column='w', n_classes=n_classes, model_dir=self._model_dir) data_rank_1 = np.array([0, 1]) self.assertEqual((2,), data_rank_1.shape) train_input_fn = numpy_io.numpy_input_fn( x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) self._assert_checkpoint(n_classes, 200) def testBinaryClassesTrainWithOneDimWeight(self): self._testTrainWithOneDimWeight(n_classes=2) def testMultiClassesTrainWithOneDimWeight(self): self._testTrainWithOneDimWeight(n_classes=4) def _testFromScratch(self, n_classes): label = 1 age = 17 # For binary classifier: # loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are # all zero initially) and label = 1 so, # loss = 1 * -log ( sigmoid(logits) ) = 0.69315 # For multi class classifier: # loss = cross_entropy(logits, label) where logits are all 0s (weights are # all zero initially) and label = 1 so, # loss = 1 * -log ( 1.0 / n_classes ) # For this particular test case, as logits are same, the formular # 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases. mock_optimizer = self._mock_optimizer( expected_loss=-1 * math.log(1.0/n_classes)) est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 est.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( n_classes, expected_global_step=num_steps, expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes], expected_bias=[0.] if n_classes == 2 else [.0] * n_classes) def testBinaryClassesFromScratch(self): self._testFromScratch(n_classes=2) def testMultiClassesFromScratch(self): self._testFromScratch(n_classes=4) def _testFromCheckpoint(self, n_classes): # Create initial checkpoint. label = 1 age = 17 # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[2.0]] if n_classes == 2 else ( np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes initial_global_step = 100 with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) # For binary classifier: # logits = age * age_weight + bias = 17 * 2. - 35. = -1. # loss = sigmoid_cross_entropy(logits, label) # so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133 # For multi class classifier: # loss = cross_entropy(logits, label) # where logits = 17 * age_weight + bias and label = 1 # so, loss = 1 * -log ( soft_max(logits)[1] ) if n_classes == 2: expected_loss = 1.3133 else: logits = age_weight * age + bias logits_exp = np.exp(logits) softmax = logits_exp / logits_exp.sum() expected_loss = -1 * math.log(softmax[0, label]) mock_optimizer = self._mock_optimizer(expected_loss=expected_loss) est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 est.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( n_classes, expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) def testBinaryClassesFromCheckpoint(self): self._testFromCheckpoint(n_classes=2) def testMultiClassesFromCheckpoint(self): self._testFromCheckpoint(n_classes=4) def _testFromCheckpointFloatLabels(self, n_classes): """Tests float labels for binary classification.""" # Create initial checkpoint. if n_classes > 2: return label = 0.8 age = 17 age_weight = [[2.0]] bias = [-35.0] initial_global_step = 100 with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) # logits = age * age_weight + bias = 17 * 2. - 35. = -1. # loss = sigmoid_cross_entropy(logits, label) # => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617 mock_optimizer = self._mock_optimizer(expected_loss=1.1132617) est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 est.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) def testBinaryClassesFromCheckpointFloatLabels(self): self._testFromCheckpointFloatLabels(n_classes=2) def testMultiClassesFromCheckpointFloatLabels(self): self._testFromCheckpointFloatLabels(n_classes=4) def _testFromCheckpointMultiBatch(self, n_classes): # Create initial checkpoint. label = [1, 0] age = [17, 18.5] # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[2.0]] if n_classes == 2 else ( np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes initial_global_step = 100 with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) # For binary classifier: # logits = age * age_weight + bias # logits[0] = 17 * 2. - 35. = -1. # logits[1] = 18.5 * 2. - 35. = 2. # loss = sigmoid_cross_entropy(logits, label) # so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133 # loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269 # For multi class classifier: # loss = cross_entropy(logits, label) # where logits = [17, 18.5] * age_weight + bias and label = [1, 0] # so, loss = 1 * -log ( soft_max(logits)[label] ) if n_classes == 2: expected_loss = (1.3133 + 2.1269) else: logits = age_weight * np.reshape(age, (2, 1)) + bias logits_exp = np.exp(logits) softmax_row_0 = logits_exp[0] / logits_exp[0].sum() softmax_row_1 = logits_exp[1] / logits_exp[1].sum() expected_loss_0 = -1 * math.log(softmax_row_0[label[0]]) expected_loss_1 = -1 * math.log(softmax_row_1[label[1]]) expected_loss = expected_loss_0 + expected_loss_1 mock_optimizer = self._mock_optimizer(expected_loss=expected_loss) est = linear.LinearClassifier( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 est.train( input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assert_checkpoint( n_classes, expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) def testBinaryClassesFromCheckpointMultiBatch(self): self._testFromCheckpointMultiBatch(n_classes=2) def testMultiClassesFromCheckpointMultiBatch(self): self._testFromCheckpointMultiBatch(n_classes=4) class BaseLinearClassifierEvaluationTest(object): def __init__(self, linear_classifier_fn): self._linear_classifier_fn = linear_classifier_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _test_evaluation_for_simple_data(self, n_classes): label = 1 age = 1. # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[-11.0]] if n_classes == 2 else ( np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) est = self._linear_classifier_fn( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, model_dir=self._model_dir) eval_metrics = est.evaluate( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1) if n_classes == 2: # Binary classes: loss = sum(corss_entropy(41)) = 41. expected_metrics = { metric_keys.MetricKeys.LOSS: 41., ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: 41., metric_keys.MetricKeys.ACCURACY: 0., metric_keys.MetricKeys.PREDICTION_MEAN: 0., metric_keys.MetricKeys.LABEL_MEAN: 1., metric_keys.MetricKeys.ACCURACY_BASELINE: 1, metric_keys.MetricKeys.AUC: 0., metric_keys.MetricKeys.AUC_PR: 1., } else: # Multi classes: loss = 1 * -log ( soft_max(logits)[label] ) logits = age_weight * age + bias logits_exp = np.exp(logits) softmax = logits_exp / logits_exp.sum() expected_loss = -1 * math.log(softmax[0, label]) expected_metrics = { metric_keys.MetricKeys.LOSS: expected_loss, ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: expected_loss, metric_keys.MetricKeys.ACCURACY: 0., } self.assertAllClose(sorted_key_dict(expected_metrics), sorted_key_dict(eval_metrics), rtol=1e-3) def test_binary_classes_evaluation_for_simple_data(self): self._test_evaluation_for_simple_data(n_classes=2) def test_multi_classes_evaluation_for_simple_data(self): self._test_evaluation_for_simple_data(n_classes=4) def _test_evaluation_batch(self, n_classes): """Tests evaluation for batch_size==2.""" label = [1, 0] age = [17., 18.] # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[2.0]] if n_classes == 2 else ( np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes initial_global_step = 100 with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) est = self._linear_classifier_fn( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, model_dir=self._model_dir) eval_metrics = est.evaluate( input_fn=lambda: ({'age': (age)}, (label)), steps=1) if n_classes == 2: # Logits are (-1., 1.) labels are (1, 0). # Loss is # loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133 # loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133 expected_loss = 1.3133 * 2 expected_metrics = { metric_keys.MetricKeys.LOSS: expected_loss, ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2, metric_keys.MetricKeys.ACCURACY: 0., metric_keys.MetricKeys.PREDICTION_MEAN: 0.5, metric_keys.MetricKeys.LABEL_MEAN: 0.5, metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5, metric_keys.MetricKeys.AUC: 0., metric_keys.MetricKeys.AUC_PR: 0.25, } else: # Multi classes: loss = 1 * -log ( soft_max(logits)[label] ) logits = age_weight * np.reshape(age, (2, 1)) + bias logits_exp = np.exp(logits) softmax_row_0 = logits_exp[0] / logits_exp[0].sum() softmax_row_1 = logits_exp[1] / logits_exp[1].sum() expected_loss_0 = -1 * math.log(softmax_row_0[label[0]]) expected_loss_1 = -1 * math.log(softmax_row_1[label[1]]) expected_loss = expected_loss_0 + expected_loss_1 expected_metrics = { metric_keys.MetricKeys.LOSS: expected_loss, ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2, metric_keys.MetricKeys.ACCURACY: 0., } self.assertAllClose(sorted_key_dict(expected_metrics), sorted_key_dict(eval_metrics), rtol=1e-3) def test_binary_classes_evaluation_batch(self): self._test_evaluation_batch(n_classes=2) def test_multi_classes_evaluation_batch(self): self._test_evaluation_batch(n_classes=4) def _test_evaluation_weights(self, n_classes): """Tests evaluation with weights.""" label = [1, 0] age = [17., 18.] weights = [1., 2.] # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[2.0]] if n_classes == 2 else ( np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes initial_global_step = 100 with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) est = self._linear_classifier_fn( feature_columns=(feature_column_lib.numeric_column('age'),), n_classes=n_classes, weight_column='w', model_dir=self._model_dir) eval_metrics = est.evaluate( input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1) if n_classes == 2: # Logits are (-1., 1.) labels are (1, 0). # Loss is # loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133 # loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133 # weights = [1., 2.] expected_loss = 1.3133 * (1. + 2.) loss_mean = expected_loss / (1.0 + 2.0) label_mean = np.average(label, weights=weights) logits = [-1, 1] logistics = sigmoid(np.array(logits)) predictions_mean = np.average(logistics, weights=weights) expected_metrics = { metric_keys.MetricKeys.LOSS: expected_loss, ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: loss_mean, metric_keys.MetricKeys.ACCURACY: 0., metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean, metric_keys.MetricKeys.LABEL_MEAN: label_mean, metric_keys.MetricKeys.ACCURACY_BASELINE: ( max(label_mean, 1-label_mean)), metric_keys.MetricKeys.AUC: 0., metric_keys.MetricKeys.AUC_PR: 0.1668, } else: # Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] ) logits = age_weight * np.reshape(age, (2, 1)) + bias logits_exp = np.exp(logits) softmax_row_0 = logits_exp[0] / logits_exp[0].sum() softmax_row_1 = logits_exp[1] / logits_exp[1].sum() expected_loss_0 = -1 * math.log(softmax_row_0[label[0]]) expected_loss_1 = -1 * math.log(softmax_row_1[label[1]]) loss_mean = np.average([expected_loss_0, expected_loss_1], weights=weights) expected_loss = loss_mean * np.sum(weights) expected_metrics = { metric_keys.MetricKeys.LOSS: expected_loss, ops.GraphKeys.GLOBAL_STEP: 100, metric_keys.MetricKeys.LOSS_MEAN: loss_mean, metric_keys.MetricKeys.ACCURACY: 0., } self.assertAllClose(sorted_key_dict(expected_metrics), sorted_key_dict(eval_metrics), rtol=1e-3) def test_binary_classes_evaluation_weights(self): self._test_evaluation_weights(n_classes=2) def test_multi_classes_evaluation_weights(self): self._test_evaluation_weights(n_classes=4) class BaseLinearClassifierPredictTest(object): def __init__(self, linear_classifier_fn): self._linear_classifier_fn = linear_classifier_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _testPredictions(self, n_classes, label_vocabulary, label_output_fn): """Tests predict when all variables are one-dimensional.""" age = 1. # For binary case, the expected weight has shape (1,1). For multi class # case, the shape is (1, n_classes). In order to test the weights, set # weights as 2.0 * range(n_classes). age_weight = [[-11.0]] if n_classes == 2 else ( np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32), (1, n_classes))) bias = [10.0] if n_classes == 2 else [10.0] * n_classes with ops.Graph().as_default(): variables.Variable(age_weight, name=AGE_WEIGHT_NAME) variables.Variable(bias, name=BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) save_variables_to_ckpt(self._model_dir) est = self._linear_classifier_fn( feature_columns=(feature_column_lib.numeric_column('age'),), label_vocabulary=label_vocabulary, n_classes=n_classes, model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( x={'age': np.array([[age]])}, y=None, batch_size=1, num_epochs=1, shuffle=False) predictions = list(est.predict(input_fn=predict_input_fn)) if n_classes == 2: scalar_logits = np.asscalar( np.reshape(np.array(age_weight) * age + bias, (1,))) two_classes_logits = [0, scalar_logits] two_classes_logits_exp = np.exp(two_classes_logits) softmax = two_classes_logits_exp / two_classes_logits_exp.sum() expected_predictions = { 'class_ids': [0], 'classes': [label_output_fn(0)], 'logistic': [sigmoid(np.array(scalar_logits))], 'logits': [scalar_logits], 'probabilities': softmax, } else: onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,)) class_ids = onedim_logits.argmax() logits_exp = np.exp(onedim_logits) softmax = logits_exp / logits_exp.sum() expected_predictions = { 'class_ids': [class_ids], 'classes': [label_output_fn(class_ids)], 'logits': onedim_logits, 'probabilities': softmax, } self.assertEqual(1, len(predictions)) # assertAllClose cannot handle byte type. self.assertEqual(expected_predictions['classes'], predictions[0]['classes']) expected_predictions.pop('classes') predictions[0].pop('classes') self.assertAllClose(sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0])) def testBinaryClassesWithoutLabelVocabulary(self): n_classes = 2 self._testPredictions(n_classes, label_vocabulary=None, label_output_fn=lambda x: ('%s' % x).encode()) def testBinaryClassesWithLabelVocabulary(self): n_classes = 2 self._testPredictions( n_classes, label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)], label_output_fn=lambda x: ('class_vocab_%s' % x).encode()) def testMultiClassesWithoutLabelVocabulary(self): n_classes = 4 self._testPredictions( n_classes, label_vocabulary=None, label_output_fn=lambda x: ('%s' % x).encode()) def testMultiClassesWithLabelVocabulary(self): n_classes = 4 self._testPredictions( n_classes, label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)], label_output_fn=lambda x: ('class_vocab_%s' % x).encode()) class BaseLinearClassifierIntegrationTest(object): def __init__(self, linear_classifier_fn): self._linear_classifier_fn = linear_classifier_fn def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, prediction_length): feature_columns = [ feature_column_lib.numeric_column('x', shape=(input_dimension,)) ] est = self._linear_classifier_fn( feature_columns=feature_columns, n_classes=n_classes, model_dir=self._model_dir) # TRAIN # learn y = x est.train(train_input_fn, steps=200) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores)) # PREDICT predictions = np.array( [x['classes'] for x in est.predict(predict_input_fn)]) self.assertAllEqual((prediction_length, 1), predictions.shape) # EXPORT feature_spec = feature_column_lib.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def _test_numpy_input_fn(self, n_classes): """Tests complete flow with numpy_input_fn.""" input_dimension = 4 batch_size = 10 prediction_length = batch_size data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32) data = data.reshape(batch_size, input_dimension) target = np.array([1] * batch_size) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=target, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=target, batch_size=batch_size, num_epochs=1, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=None, batch_size=batch_size, num_epochs=1, shuffle=False) self._test_complete_flow( n_classes=n_classes, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, prediction_length=prediction_length) def test_binary_classes_numpy_input_fn(self): self._test_numpy_input_fn(n_classes=2) def test_multi_classes_numpy_input_fn(self): self._test_numpy_input_fn(n_classes=4) def _test_pandas_input_fn(self, n_classes): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return # Pandas DataFrame natually supports 1 dim data only. input_dimension = 1 batch_size = 10 data = np.array([1., 2., 3., 4.], dtype=np.float32) target = np.array([1, 0, 1, 0], dtype=np.int32) x = pd.DataFrame({'x': data}) y = pd.Series(target) prediction_length = 4 train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( n_classes=n_classes, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, prediction_length=prediction_length) def test_binary_classes_pandas_input_fn(self): self._test_pandas_input_fn(n_classes=2) def test_multi_classes_pandas_input_fn(self): self._test_pandas_input_fn(n_classes=4) def _test_input_fn_from_parse_example(self, n_classes): """Tests complete flow with input_fn constructed from parse_example.""" input_dimension = 2 batch_size = 10 prediction_length = batch_size data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32) data = data.reshape(batch_size, input_dimension) target = np.array([1] * batch_size, dtype=np.int64) serialized_examples = [] for x, y in zip(data, target): example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=x)), 'y': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=[y])), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([1], dtypes.int64), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( n_classes=n_classes, train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=input_dimension, prediction_length=prediction_length) def test_binary_classes_input_fn_from_parse_example(self): self._test_input_fn_from_parse_example(n_classes=2) def test_multi_classes_input_fn_from_parse_example(self): self._test_input_fn_from_parse_example(n_classes=4) class BaseLinearLogitFnTest(object): def test_basic_logit_correctness(self): """linear_logit_fn simply wraps feature_column_lib.linear_model.""" age = feature_column_lib.numeric_column('age') with ops.Graph().as_default(): logit_fn = linear._linear_logit_fn_builder(units=2, feature_columns=[age]) logits = logit_fn(features={'age': [[23.], [31.]]}) with variable_scope.variable_scope('linear_model', reuse=True): bias_var = variable_scope.get_variable('bias_weights') age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0] with tf_session.Session() as sess: sess.run([variables.global_variables_initializer()]) self.assertAllClose([[0., 0.], [0., 0.]], logits.eval()) sess.run(bias_var.assign([10., 5.])) self.assertAllClose([[10., 5.], [10., 5.]], logits.eval()) sess.run(age_var.assign([[2.0, 3.0]])) # [2 * 23 + 10, 3 * 23 + 5] = [56, 74]. # [2 * 31 + 10, 3 * 31 + 5] = [72, 98] self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
apache-2.0
alvarofierroclavero/scikit-learn
examples/model_selection/plot_train_error_vs_test_error.py
349
2577
""" ========================= Train error vs Test error ========================= Illustration of how the performance of an estimator on unseen data (test data) is not the same as the performance on training data. As the regularization increases the performance on train decreases while the performance on test is optimal within a range of values of the regularization parameter. The example with an Elastic-Net regression model and the performance is measured using the explained variance a.k.a. R^2. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause import numpy as np from sklearn import linear_model ############################################################################### # Generate sample data n_samples_train, n_samples_test, n_features = 75, 150, 500 np.random.seed(0) coef = np.random.randn(n_features) coef[50:] = 0.0 # only the top 10 features are impacting the model X = np.random.randn(n_samples_train + n_samples_test, n_features) y = np.dot(X, coef) # Split train and test data X_train, X_test = X[:n_samples_train], X[n_samples_train:] y_train, y_test = y[:n_samples_train], y[n_samples_train:] ############################################################################### # Compute train and test errors alphas = np.logspace(-5, 1, 60) enet = linear_model.ElasticNet(l1_ratio=0.7) train_errors = list() test_errors = list() for alpha in alphas: enet.set_params(alpha=alpha) enet.fit(X_train, y_train) train_errors.append(enet.score(X_train, y_train)) test_errors.append(enet.score(X_test, y_test)) i_alpha_optim = np.argmax(test_errors) alpha_optim = alphas[i_alpha_optim] print("Optimal regularization parameter : %s" % alpha_optim) # Estimate the coef_ on full data with optimal regularization parameter enet.set_params(alpha=alpha_optim) coef_ = enet.fit(X, y).coef_ ############################################################################### # Plot results functions import matplotlib.pyplot as plt plt.subplot(2, 1, 1) plt.semilogx(alphas, train_errors, label='Train') plt.semilogx(alphas, test_errors, label='Test') plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k', linewidth=3, label='Optimum on test') plt.legend(loc='lower left') plt.ylim([0, 1.2]) plt.xlabel('Regularization parameter') plt.ylabel('Performance') # Show estimated coef_ vs true coef plt.subplot(2, 1, 2) plt.plot(coef, label='True coef') plt.plot(coef_, label='Estimated coef') plt.legend() plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26) plt.show()
bsd-3-clause
iamshang1/Projects
Papers/HiSAN/tf_hisan_multigpu.py
1
14655
import os import numpy as np import tensorflow as tf from tensorflow.python.client import device_lib import sys import time from sklearn.metrics import f1_score import random class hisan_multigpu(object): def __init__(self,embedding_matrix,num_classes,max_sents,max_words,attention_heads=8, attention_size=512,dropout_keep=0.9,num_gpus=1): self.dropout_keep = dropout_keep self.dropout = tf.placeholder(tf.float32) self.ms = max_sents self.mw = max_words self.embedding_matrix = tf.get_variable('embeddings', initializer=embedding_matrix.astype(np.float32), dtype=tf.float32) self.attention_size = attention_size self.attention_heads = attention_heads self.num_gpus = num_gpus self.unk_tok = embedding_matrix.shape[0] - 1 self.vocab_size = embedding_matrix.shape[0] with tf.variable_scope('hisan',reuse=tf.AUTO_REUSE): self.logits = [] self.predictions = [] #inputs self.doc_inputs = [] self.labels = tf.placeholder(tf.int32,shape=[None]) for g in range(self.num_gpus): with tf.device("/gpu:%d" % g): #doc input doc_input = tf.placeholder(tf.int32, shape=[None,max_sents,max_words]) self.doc_inputs.append(doc_input) doc_embeds = tf.map_fn(self._attention_step,doc_input,dtype=tf.float32) #classification functions logit = tf.layers.dense(doc_embeds,num_classes, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='logits') self.logits.append(logit) self.predictions.append(tf.nn.softmax(logit)) #predictions and optimizers self.predictions = tf.concat(self.predictions,0) self.logits = tf.concat(self.logits,0) self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.logits,labels=self.labels) self.optimizer = tf.train.AdamOptimizer(0.0001,0.9,0.99).minimize( self.loss,colocate_gradients_with_ops=True) #init op config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True self.saver = tf.train.Saver() self.sess = tf.Session(config=config) self.sess.run(tf.global_variables_initializer()) def _attention_step(self,doc): with tf.variable_scope('hisan',reuse=tf.AUTO_REUSE): words_per_line = tf.count_nonzero(doc,1,dtype=tf.int32) num_lines = tf.count_nonzero(words_per_line,dtype=tf.int32) max_words_ = tf.reduce_max(words_per_line) doc_input_reduced = doc[:num_lines,:max_words_] num_words = words_per_line[:num_lines] #word embeddings word_embeds = tf.gather(self.embedding_matrix,doc_input_reduced) word_embeds = tf.nn.dropout(word_embeds,self.dropout) #masking mask_base = tf.cast(tf.sequence_mask(num_words,max_words_),tf.float32) mask = tf.tile(tf.expand_dims(mask_base,2),[1,1,self.attention_size]) mask2 = tf.tile(tf.expand_dims(mask_base,2),[self.attention_heads,1,max_words_]) mask3 = tf.tile(tf.expand_dims(mask_base,1),[self.attention_heads,1,1]) #word self attention Q = tf.layers.conv1d(word_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='word_Q') K = tf.layers.conv1d(word_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='word_K') V = tf.layers.conv1d(word_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='word_V') Q = tf.multiply(Q,mask) K = tf.multiply(K,mask) V = tf.multiply(V,mask) Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0) K_ = tf.concat(tf.split(K,self.attention_heads,axis=2),axis=0) V_ = tf.concat(tf.split(V,self.attention_heads,axis=2),axis=0) outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1])) outputs = outputs/(K_.get_shape().as_list()[-1]**0.5) outputs = tf.add(outputs,(mask2-1)*1e10) outputs = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout) outputs = tf.multiply(outputs,mask2) outputs = tf.matmul(outputs,V_) word_outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2) word_outputs = tf.multiply(word_outputs,mask) #word target attention Q = tf.get_variable('word_Q',(1,1,self.attention_size), tf.float32,tf.orthogonal_initializer()) Q = tf.tile(Q,[num_lines,1,1]) Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0) K_ = tf.concat(tf.split(word_outputs,self.attention_heads,axis=2),axis=0) V_ = tf.concat(tf.split(word_outputs,self.attention_heads,axis=2),axis=0) outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1])) outputs = outputs/(K_.get_shape().as_list()[-1]**0.5) outputs = tf.add(outputs,(mask3-1)*1e10) outputs = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout) outputs = tf.matmul(outputs,V_) sent_embeds = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2) sent_embeds = tf.transpose(sent_embeds,[1,0,2]) sent_embeds = tf.nn.dropout(sent_embeds,self.dropout) #sent self attention Q = tf.layers.conv1d(sent_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='sent_Q') K = tf.layers.conv1d(sent_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='sent_K') V = tf.layers.conv1d(sent_embeds,self.attention_size,1,padding='same',activation=tf.nn.elu, kernel_initializer=tf.contrib.layers.xavier_initializer(),name='sent_V') Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0) K_ = tf.concat(tf.split(K,self.attention_heads,axis=2),axis=0) V_ = tf.concat(tf.split(V,self.attention_heads,axis=2),axis=0) outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1])) outputs = outputs/(K_.get_shape().as_list()[-1]**0.5) outputs = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout) outputs = tf.matmul(outputs,V_) sent_outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2) #sent target attention Q = tf.get_variable('sent_Q_target',(1,1,self.attention_size), tf.float32,tf.orthogonal_initializer()) Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0) K_ = tf.concat(tf.split(sent_outputs,self.attention_heads,axis=2),axis=0) V_ = tf.concat(tf.split(sent_outputs,self.attention_heads,axis=2),axis=0) outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1])) outputs = outputs/(K_.get_shape().as_list()[-1]**0.5) outputs = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout) outputs = tf.matmul(outputs,V_) doc_embed = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2) doc_embed = tf.nn.dropout(tf.squeeze(doc_embed,[0]),self.dropout) return tf.squeeze(doc_embed,[0]) def _split_lines(self,data,noise=False): batch_size = len(data) retval = np.zeros((batch_size,self.ms,self.mw)) for i,doc in enumerate(data): doc_ = doc doc_ = list(doc[doc.nonzero()]) #randomly add padding to front if noise: pad_amt = np.random.randint(0,self.mw) doc_ = [int(self.unk_tok) for i in range(pad_amt)] + doc_ tokens = len(doc_) for j,line in enumerate([doc_[i:i+self.mw] for i in range(0,tokens,self.mw)]): line_ = line l = len(line_) #randomly replace tokens if noise and np.count_nonzero(line) == self.mw: r_idx = np.random.randint(0,self.mw) line_[r_idx] = np.random.randint(1,self.vocab_size) retval[i,j,:l] = line_ gpu_batch_size = int(np.ceil(batch_size/self.num_gpus)) data_split = [retval[i:i+gpu_batch_size] for i in range(0,batch_size,gpu_batch_size)] return data_split def train(self,data,labels,batch_size=128,epochs=1000,patience=5,validation_data=None, savebest=False,filepath=None): if savebest==True and filepath==None: raise Exception("Please enter a path to save the network") if validation_data: validation_size = len(validation_data[0]) else: validation_size = len(data) print('training network on %i documents, validating on %i documents' \ % (len(data), validation_size)) #track best model for saving prevbest = 0 pat_count = 0 for ep in range(epochs): #shuffle data xy = list(zip(data,labels)) random.shuffle(xy) data,labels = zip(*xy) data = np.array(data) labels = np.array(labels) preds = [] #train for start in range(0,len(data),batch_size): #get batch index if start+batch_size < len(data): stop = start+batch_size else: stop = len(data) data_split = self._split_lines(data[start:stop],noise=True) feed_dict = {self.dropout:self.dropout_keep,self.labels:labels[start:stop]} for g in range(self.num_gpus): feed_dict[self.doc_inputs[g]] = data_split[g] pred,loss,_ = self.sess.run([self.predictions,self.loss,self.optimizer], feed_dict=feed_dict) preds.append(np.argmax(pred,1)) sys.stdout.write("epoch %i, training sample %i of %i \r"\ % (ep+1,stop,len(data))) sys.stdout.flush() print() #checkpoint after every epoch preds = np.concatenate(preds,0) micro = f1_score(labels,preds,average='micro') macro = f1_score(labels,preds,average='macro') print("epoch %i training micro/macro: %.4f, %.4f" % (ep+1,micro,macro)) micro,macro = self.score(validation_data[0],validation_data[1], batch_size=batch_size) print("epoch %i validation micro/macro: %.4f, %.4f" % (ep+1,micro,macro)) #save if performance better than previous best if micro >= prevbest: prevbest = micro pat_count = 0 if savebest: self.save(filepath) else: pat_count += 1 if pat_count >= patience: break #reset timer start_time = time.time() def predict(self,data,batch_size=128): preds = [] for start in range(0,len(data),batch_size): #get batch index if start+batch_size < len(data): stop = start+batch_size else: stop = len(data) data_split = self._split_lines(data[start:stop]) feed_dict = {self.dropout:1.0} for g in range(self.num_gpus): feed_dict[self.doc_inputs[g]] = data_split[g] pred = self.sess.run(self.predictions,feed_dict=feed_dict) preds.append(np.argmax(pred,1)) sys.stdout.write("predicting sample %i of %i \r" % (stop,len(data))) sys.stdout.flush() print() preds = np.concatenate(preds,0) return preds def score(self,data,labels,batch_size=128): preds = self.predict(data,batch_size=batch_size) micro = f1_score(labels,preds,average='micro') macro = f1_score(labels,preds,average='macro') return micro,macro def save(self,filename): self.saver.save(self.sess,filename) def load(self,filename): self.saver.restore(self.sess,filename) if __name__ == "__main__": ''' dummy test data ''' n_gpus = len([x.name for x in device_lib.list_local_devices() if 'GPU' in x.device_type]) print('training on %i gpus' % n_gpus) #params batch_size = 32 epochs = 30 train_samples = 1000 test_samples = 500 vocab_size = 1000 max_words = 200 num_classes = 3 embedding_size = 100 attention_heads = 4 attention_size = 200 #create data vocab = np.random.rand(vocab_size,embedding_size) X_train = [] X_test = [] for i in range(train_samples): l = np.random.randint(50,max_words) X = np.zeros(max_words) X[:l] = np.random.randint(1,vocab_size,l) X_train.append(X) for i in range(test_samples): l = np.random.randint(50,max_words) X = np.zeros(max_words) X[:l] = np.random.randint(1,vocab_size,l) X_test.append(X) X_train = np.vstack(X_train) X_test = np.vstack(X_test) y_train = np.random.randint(0,num_classes,train_samples) y_test = np.random.randint(0,num_classes,test_samples) #make save dir if not os.path.exists('savedmodels'): os.makedirs('savedmodels') #train model model = hisan_multigpu(vocab,num_classes,int(np.ceil(max_words/15)+1),15, attention_heads,attention_size,num_gpus=n_gpus) model.train(X_train,y_train,batch_size,epochs,validation_data=(X_test,y_test), savebest=True,filepath='savedmodels/model.ckpt') model.load('savedmodels/model.ckpt')
mit
ycopin/spectrogrism
spectrogrism/snifs.py
1
4440
# -*- coding: utf-8 -*- # Time-stamp: <2016-03-23 00:19 ycopin@lyonovae03.in2p3.fr> """ snifs ----- SNIFS optical configuration and utilities. """ from __future__ import division, print_function, absolute_import __author__ = "Yannick Copin <y.copin@ipnl.in2p3.fr>" import warnings import numpy as N if __name__ == "__main__": # Cannot import explicitely local spectrogrism using relative import # in a script ("main"): # from . import spectrogrism as S # ValueError: Attempted relative import in non-package import spectrogrism as S # Import *local* spectrogrism module else: from . import spectrogrism as S #: SNIFS optical configuration, R-channel SNIFS_R = S.OptConfig([ ('name', "SNIFS-R"), # Configuration name ('wave_ref', 0.76e-6), # Reference wavelength [m] ('wave_range', [0.5e-6, 1.02e-6]), # Standard wavelength range [m] # Telescope ('telescope_flength', 22.5), # Focal length [m] # Collimator ('collimator_flength', 169.549e-3), # Focal length [m] ('collimator_distortion', +2.141), # r² distortion coefficient ('collimator_lcolor_coeffs', [-4.39879e-6, 8.91241e-10, -1.82941e-13]), # Grism ('grism_prism_material', 'BK7'), # Prism glass ('grism_prism_angle', 17.28 / S.RAD2DEG), # Prism angle [rad] ('grism_grating_rho', 200.), # Grating groove density [lines/mm] ('grism_dispersion', 2.86), # Informative spectral dispersion [AA/px] ('grism_grating_material', 'EPR'), # Grating resine ('grism_grating_blaze', 15. / S.RAD2DEG), # Blaze angle [rad] # Camera ('camera_flength', 228.014e-3), # Focal length [m] ('camera_distortion', -0.276), # r² distortion coefficient ('camera_lcolor_coeffs', [+2.66486e-6, -5.52303e-10, 1.1365e-13]), # Detector ('detector_pxsize', 15e-6), # Detector pixel size [m] ('detector_angle', 0. / S.RAD2DEG), # Rotation of the detector (0=blue is up) ]) #: SNIFS simulation configuration SNIFS_SIMU = S.SimConfig([ ('name', u"standard"), # Configuration name ('wave_npx', 10), # Nb of pixels per spectrum ('modes', (1, 0, 2, -1)), # Dispersion orders # Focal plane sampling ('input_coords', N.linspace(-1e-2, 1e-2, 5)), # Focal plane grid [m] ('input_angle', -10. / S.RAD2DEG), # Focal plane angle [rad] ]) # Simulations ============================== def plot_SNIFS(optcfg=SNIFS_R, simcfg=SNIFS_SIMU, test=True, verbose=False): """ Test-case w/ SNIFS-like configuration. """ # Optical configuration print(optcfg) # Spectrograph spectro = S.Spectrograph(optcfg) print(spectro) # Simulation configuration print(simcfg) if test: print(" Spectrograph round-trip test ".center(S.LINEWIDTH, '-')) for mode in simcfg['modes']: if not spectro.test(simcfg.get_wavelengths(optcfg), mode=mode, verbose=verbose): warnings.warn( "Order #{}: backward modeling does not match." .format(mode)) else: print("Order #{:+d}: OK".format(mode)) positions = spectro.predict_positions(simcfg) ax = positions.plot(modes=(-1, 0, 1, 2), blaze=True) ax.legend(loc='upper left', fontsize='small', frameon=True, framealpha=0.5) ax.set_aspect('auto') ax.axis(N.array([-2000, 2000, -4000, 4000]) * spectro.detector.pxsize / 1e-3) # [mm] return ax # Main ==================================================== if __name__ == '__main__': import matplotlib.pyplot as P try: import seaborn seaborn.set_style("whitegrid") except ImportError: pass ax = plot_SNIFS(test=True, verbose=False) embed_html = False if embed_html == 'mpld3': try: S.dump_mpld3(ax.figure, 'SNIFS-R_mpld3.html') except ImportError: warnings.warn("MPLD3 is not available, cannot export to HTML.") elif embed_html == 'bokeh': try: S.dump_bokeh(ax.figure, 'SNIFS-R_bokeh.html') except ImportError: warnings.warn("Bokeh is not available, cannot export to HTML.") elif embed_html: warnings.warn("Unknown HTML method '{}'".format(embed_html)) P.show()
lgpl-3.0
ashiklom/studyGroup
lessons/RISE/demoUtilities.py
2
3183
import numpy as np import matplotlib as mp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plotSetup(xmin = -3.0, xmax = 3.0, ymin = -3.0, ymax = 3.0, size=(6,6)): """ refactored version of ut.plotSetup to hide as much as possible when showing code basics of 2D plot setup defaults: xmin = -3.0, xmax = 3.0, ymin = -3.0, ymax = 3.0, size=(6,6) size is by default 6 inches by 6 inches """ fig = plt.figure(figsize=size) ax = fig.add_subplot(1, 1, 1, aspect='equal') plt.xlim([xmin, xmax]) plt.ylim([ymin, ymax]) ax.axes.set_xlim([xmin, xmax]) centerAxes(ax) return ax def AxVS(A,x): """ Takes a matrix A and a vector x and returns their product """ m,n = np.shape(A) b = np.zeros(m) for i in range(n): b = b + x[i] * A[:,i] return b def mnote(): res = np.array( [[193,47], [140,204], [123,193], [99,189], [74,196], [58,213], [49,237], [52,261], [65,279], [86,292], [113,295], [135,282], [152,258], [201,95], [212,127], [218,150], [213,168], [201,185], [192,200], [203,214], [219,205], [233,191], [242,170], [244,149], [242,131], [233,111]]) return res.T/150.0 def centerAxes (ax): ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['top'].set_color('none') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') bounds = np.array([ax.axes.get_xlim(), ax.axes.get_ylim()]) ax.plot(bounds[0][0],bounds[1][0],'') ax.plot(bounds[0][1],bounds[1][1],'') # ax.plot(bounds, '') def plotSquare(x,color='b'): y = np.concatenate((x,x[:,[0]]),axis=1) plt.plot(y[0],y[1],'b-') plt.plot(y[0,0],y[1,0],'ro') plt.plot(y[0,1],y[1,1],'go') plt.plot(y[0,2],y[1,2],'co') plt.plot(y[0,3],y[1,3],'yo') plt.fill(x[0],x[1],color,alpha=0.15) def plotShape(x,color='b'): y = np.concatenate((x,x[:,[0]]),axis=1) plt.plot(y[0],y[1],'{}-'.format(color)) plt.fill(x[0],x[1],color,alpha=0.15) if __name__ == "__main__": # circle = np.zeros((2,20)) # for i in range(20): # circle[0,i] = np.sin(2 * 3.14 * (i/20.0)) # circle[1,i] = np.cos(2 * 3.14 * (i/20.0)) # fig = plt.figure() # ax = fig.add_subplot(111, aspect='equal') # plt.plot(circle[0,:],circle[1,:],'o') square = np.array([[0.0,1,1,0],[1,1,0,0]]) fig = plt.figure() ax = plotSetup(-4,4,-4,4) centerAxes(ax) plotSquare(square) # shear matrix shear = np.array([[1.0, 1.5],[0.0,1.0]]) ssquare = shear.dot(square) plotSquare(ssquare) # rotation matrix angle = 10.0 theta = (angle/360.0) * 2.0 * np.pi rotate = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]) rsquare = rotate.dot(square) plotSquare(2*rsquare)
apache-2.0
mojoboss/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
koningrobot/sparse-coding-theano
example.py
2
1895
import random import math import numpy as np import scipy.io import matplotlib.pyplot as plt import matplotlib.cm as cm import logging logging.basicConfig(level=logging.WARNING) import sparse_coding as sc def onto_unit(x): a = np.min(x) b = np.max(x) return (x - a) / (b - a) def visualize_patches(B): # assume square mpatch = int(math.floor(math.sqrt(B.shape[0]))) npatch = mpatch m = int(math.floor(math.sqrt(B.shape[1]))) n = int(math.ceil(B.shape[1] * 1.0 / m)) collage = np.zeros((m*mpatch, n*npatch)) for i in xrange(m): for j in xrange(n): try: patch = B[:, i*n + j] except IndexError: continue patch = onto_unit(patch.reshape((mpatch, npatch))) collage[i*mpatch:(i+1)*mpatch, j*npatch:(j+1)*npatch] = patch plt.imshow(collage, cmap=cm.gray) def callback(X, B, S): plt.subplot(2, 2, 1) visualize_patches(X) plt.title("originals") plt.subplot(2, 2, 2) visualize_patches(B) plt.title("bases") plt.subplot(2, 2, 3) visualize_patches(np.dot(B, S)) plt.title("reconstructions") plt.subplot(2, 2, 4) visualize_patches(X - np.dot(B, S)) plt.title("differences") plt.show() images = scipy.io.loadmat("IMAGES.mat")["IMAGES"] patch_size = 8 num_patches = 4 columns = [] for i in xrange(num_patches): j = random.randint(0, images.shape[2] - 1) y, x = [random.randint(0, images.shape[d] - patch_size) for d in (0, 1)] column = images[x:x+patch_size, y:y+patch_size, j].reshape((patch_size**2, 1)) columns.append(column) X = np.hstack(columns) # test callback function on svd #svd = np.linalg.svd(X, full_matrices=False) #print [x.shape for x in svd] #callback(X, svd[0], np.dot(np.diag(svd[1]), svd[2])) num_bases = 64 sc.sparse_coding(X, num_bases, 0.4, 100, lambda B, S: callback(X, B, S))
mit
astrolitterbox/SAMI
utils.py
1
6843
from __future__ import division import numpy as np from astropy.coordinates.distances import Distance import matplotlib.pyplot as plt import pyfits import db import pyfits from string import * from astroML.plotting import hist from geom import getIncl def simple_plot(x, y, vel, filename): fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, aspect='equal',autoscale_on=False, xlim=[-25,25], ylim=[-25,25]) cb = ax.scatter(x, y, c=vel, edgecolor="none", vmin=-120, vmax=120) plt.colorbar(cb) ax.axhline(c='k') ax.axvline(c='k') plt.savefig(filename) def get_GAMA_incl(sami_id): sami_id = str(sami_id) GAMA_file = 'db/metadata/'+sami_id+"/"+sami_id+"_GAMA_metadata.fits.gz" ell = pyfits.getdata(GAMA_file, extname='SERSICCATALL', header=False)['GAL_ELLIP_R'][0] ba = 1-ell incl = getIncl(ba) return incl def plot_hist(x, filename): fig = plt.figure(figsize=(10, 10)) hist(x, bins='scott') plt.savefig(filename) plt.close() def get_SAMI_data(sami_id): r50 = db.dbUtils.getFromDB('R_e', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0] W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0] W50_err = db.dbUtils.getFromDB('W50_err', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0] return r50, W50, W50_err def get_SAMI_coords(sami_id): ra = db.dbUtils.getFromDB('ra', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0] dec = db.dbUtils.getFromDB('dec', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0] return ra, dec def get_delta_z(sami_id): z = db.dbUtils.getFromDB('z', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0] alfalfa_id = db.dbUtils.getFromDB('ALFALFA_id', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0] alfalfa_z = db.dbUtils.getFromDB('V_P', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'")/300000 return round(np.abs(float(z) - float(alfalfa_z)), 6) def get_delta_coords(sami_id): ra, dec = get_SAMI_coords(sami_id) alfalfa_id = db.dbUtils.getFromDB('ALFALFA_id', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0] print 'where object ='+"'"+lstrip(str(alfalfa_id))+"'" alfalfa_ra = 15*db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'") alfalfa_dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'") return (round(np.abs(float(alfalfa_ra) - float(ra)), 6), round(np.abs(float(alfalfa_dec) - float(dec)), 6)) def get_ALFALFA_W50(ra, dec): alfalfa_ra = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA') alfalfa_dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA') #print np.round(alfalfa_ra*15, 1), np.round(alfalfa_dec, 1) W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA ', ' where round(15*raopt, 1) = '+ str(np.round(ra, 1))+' and round(decopt, 1) = '+str(np.round(dec, 1)))[0] return W50 def get_stellar_velfield(filename): print filename all_vel = pyfits.getdata(filename, extname='VEL', header=False) all_vel_err = pyfits.getdata(filename, extname='VEL_ERR', header=False) #good = np.where(all_vel_err) < 100 #vel = all_vel[good] #vel_err = all_vel_err[good] print all_vel all_vel = np.ma.masked_invalid(all_vel) all_vel_err = np.ma.masked_invalid(all_vel_err) mask = np.where(all_vel_err < 300) print mask #getting indices, i.e. y and x: ind = np.column_stack(mask) - 25 x, y = np.asarray(zip(*ind)) vel_err = all_vel_err.filled()[mask] vel = all_vel.filled()[mask] #print 'HI', HI_linewidth return x, y, vel, vel_err def get_gas_velfield(filename): print filename all_vel = pyfits.getdata(filename, extname='V', header=False)[1] all_vel_err = pyfits.getdata(filename, extname='V_ERR', header=False)[1] #good = np.where(all_vel_err) < 100 #vel = all_vel[good] #vel_err = all_vel_err[good] all_vel = np.ma.masked_invalid(all_vel) all_vel_err = np.ma.masked_invalid(all_vel_err) mask = np.where(all_vel_err < 20) #getting indices, i.e. y and x: ind = np.column_stack(mask) - 25 x, y = np.asarray(zip(*ind)) vel_err = all_vel_err.filled()[mask] vel = all_vel.filled()[mask] #print 'HI', HI_linewidth return x, y, vel, vel_err def angular2physical(arcsec, z): #return physical effective diameter of the galaxy in kpc return (np.radians(arcsec/3600) *Distance(z=z).kpc / (1 + z)**2) def sqlify(arr): strings = '' for i in arr: if type(i) == type(tuple()): i = i[0] strings = strings+","+'"'+strip(str(i))+'"' strings = '('+strings[1:]+')' return strings def convert_pc_to_meters(pc): return pc*3.0857*10e16 def decodeU(query_output): output = [] for u in query_output: u = str(u) output.append(u) return output def get_ALFALFA_data(): ra = db.dbUtils.getFromDB('ra', 'db/SAMI.sqlite', 'SAMI_Master ') dec = db.dbUtils.getFromDB('dec', 'db/SAMI.sqlite', 'SAMI_Master ') SAMI_all_ids = db.dbUtils.getFromDB('sami_id', 'db/SAMI.sqlite', 'SAMI_Master ') raopt = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA') decopt = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA') #print np.round(alfalfa_ra*15, 1), np.round(alfalfa_dec, 1) ALFALFA_ids = [] sami_ids = [] for sami_ra, sami_dec, sami_id in zip(ra, dec, SAMI_all_ids): obj = (db.dbUtils.getFromDB('Object', 'db/SAMI.sqlite', 'ALFALFA ', ' where round(15*raopt, 1) = '+ str(np.round(sami_ra, 1))+' and round(decopt, 1) = '+str(np.round(sami_dec, 1)))) if len(obj) == 1: obj = decodeU(obj)[0] ALFALFA_ids.append(obj) sami_ids.append(sami_id) W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) W50_err = db.dbUtils.getFromDB('Werr', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) int_flux = db.dbUtils.getFromDB('sintmap', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) ra = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) SN = db.dbUtils.getFromDB('SN', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) rms = db.dbUtils.getFromDB('rms', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids)) f = open('db/ALFALFA_Xmatch.csv', 'a') for i, s in enumerate(W50): #not galaxies have flux measurements print i, s f.write(str(sami_ids[i])+", "+ str(ALFALFA_ids[i])+", "+str(ra[i])+", "+str(dec[i])+", "+str(W50[i])+", "+str(W50_err[i])+", "+str(int_flux[i])+", "+str(SN[i])+", "+str(rms[i])+"\n") f.close()
gpl-2.0
ishank08/scikit-learn
examples/svm/plot_rbf_parameters.py
26
8016
''' ================== RBF SVM parameters ================== This example illustrates the effect of the parameters ``gamma`` and ``C`` of the Radial Basis Function (RBF) kernel SVM. Intuitively, the ``gamma`` parameter defines how far the influence of a single training example reaches, with low values meaning 'far' and high values meaning 'close'. The ``gamma`` parameters can be seen as the inverse of the radius of influence of samples selected by the model as support vectors. The ``C`` parameter trades off misclassification of training examples against simplicity of the decision surface. A low ``C`` makes the decision surface smooth, while a high ``C`` aims at classifying all training examples correctly by giving the model freedom to select more samples as support vectors. The first plot is a visualization of the decision function for a variety of parameter values on a simplified classification problem involving only 2 input features and 2 possible target classes (binary classification). Note that this kind of plot is not possible to do for problems with more features or target classes. The second plot is a heatmap of the classifier's cross-validation accuracy as a function of ``C`` and ``gamma``. For this example we explore a relatively large grid for illustration purposes. In practice, a logarithmic grid from :math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters lie on the boundaries of the grid, it can be extended in that direction in a subsequent search. Note that the heat map plot has a special colorbar with a midpoint value close to the score values of the best performing models so as to make it easy to tell them appart in the blink of an eye. The behavior of the model is very sensitive to the ``gamma`` parameter. If ``gamma`` is too large, the radius of the area of influence of the support vectors only includes the support vector itself and no amount of regularization with ``C`` will be able to prevent overfitting. When ``gamma`` is very small, the model is too constrained and cannot capture the complexity or "shape" of the data. The region of influence of any selected support vector would include the whole training set. The resulting model will behave similarly to a linear model with a set of hyperplanes that separate the centers of high density of any pair of two classes. For intermediate values, we can see on the second plot that good models can be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma`` values) can be made more complex by selecting a larger number of support vectors (larger ``C`` values) hence the diagonal of good performing models. Finally one can also observe that for some intermediate values of ``gamma`` we get equally performing models when ``C`` becomes very large: it is not necessary to regularize by limiting the number of support vectors. The radius of the RBF kernel alone acts as a good structural regularizer. In practice though it might still be interesting to limit the number of support vectors with a lower value of ``C`` so as to favor models that use less memory and that are faster to predict. We should also note that small differences in scores results from the random splits of the cross-validation procedure. Those spurious variations can be smoothed out by increasing the number of CV iterations ``n_splits`` at the expense of compute time. Increasing the value number of ``C_range`` and ``gamma_range`` steps will increase the resolution of the hyper-parameter heat map. ''' print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import Normalize from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import GridSearchCV # Utility function to move the midpoint of a colormap to be around # the values of interest. class MidpointNormalize(Normalize): def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False): self.midpoint = midpoint Normalize.__init__(self, vmin, vmax, clip) def __call__(self, value, clip=None): x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] return np.ma.masked_array(np.interp(value, x, y)) ############################################################################## # Load and prepare data set # # dataset for grid search iris = load_iris() X = iris.data y = iris.target # Dataset for decision function visualization: we only keep the first two # features in X and sub-sample the dataset to keep only 2 classes and # make it a binary classification problem. X_2d = X[:, :2] X_2d = X_2d[y > 0] y_2d = y[y > 0] y_2d -= 1 # It is usually a good idea to scale the data for SVM training. # We are cheating a bit in this example in scaling all of the data, # instead of fitting the transformation on the training set and # just applying it on the test set. scaler = StandardScaler() X = scaler.fit_transform(X) X_2d = scaler.fit_transform(X_2d) ############################################################################## # Train classifiers # # For an initial search, a logarithmic grid with basis # 10 is often helpful. Using a basis of 2, a finer # tuning can be achieved but at a much higher cost. C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) param_grid = dict(gamma=gamma_range, C=C_range) cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42) grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv) grid.fit(X, y) print("The best parameters are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_)) # Now we need to fit a classifier for all parameters in the 2d version # (we use a smaller set of parameters here because it takes a while to train) C_2d_range = [1e-2, 1, 1e2] gamma_2d_range = [1e-1, 1, 1e1] classifiers = [] for C in C_2d_range: for gamma in gamma_2d_range: clf = SVC(C=C, gamma=gamma) clf.fit(X_2d, y_2d) classifiers.append((C, gamma, clf)) ############################################################################## # visualization # # draw visualization of parameter effects plt.figure(figsize=(8, 6)) xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200)) for (k, (C, gamma, clf)) in enumerate(classifiers): # evaluate decision function in a grid Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # visualize decision function for these parameters plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1) plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)), size='medium') # visualize parameter's effect on decision function plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu) plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.axis('tight') scores = grid.cv_results_['mean_test_score'].reshape(len(C_range), len(gamma_range)) # Draw heatmap of the validation accuracy as a function of gamma and C # # The score are encoded as colors with the hot colormap which varies from dark # red to bright yellow. As the most interesting scores are all located in the # 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so # as to make it easier to visualize the small variations of score values in the # interesting range while not brutally collapsing all the low score values to # the same color. plt.figure(figsize=(8, 6)) plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95) plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot, norm=MidpointNormalize(vmin=0.2, midpoint=0.92)) plt.xlabel('gamma') plt.ylabel('C') plt.colorbar() plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45) plt.yticks(np.arange(len(C_range)), C_range) plt.title('Validation accuracy') plt.show()
bsd-3-clause
josh314/squinty
train-agglo.py
1
2487
################################################################ # Cluster reduction of pixel data # # Usage: python2 train-agglo.py <training data> [options] # # Builds the a data transformer (FeatureAgglomeration clusting object) which # reduces problem dimension #Parse command line options and arguments #Do this before other imports so incorrect usage message appears quickly import optparse parser = optparse.OptionParser() parser.add_option("-o", action="store", type="string", dest="o") parser.add_option("-n", action="store", type="int", dest="n") parser.add_option("-r", action="store", type="string", dest="r") parser.set_defaults(o="out.csv", n=14*14, r="reducer.p") opts, args = parser.parse_args() if(len(args) < 1): print "Usage: python2 train-agglo.py <training data> [options]" raise SystemExit(1) infile = args[0] outfile = opts.o n_clusters = opts.n reducer_fn = opts.r #Other imports import pickle import pandas as pd import numpy as np from sklearn.feature_extraction.image import grid_to_graph import sklearn.cluster as cluster #Full resolution images fullres = pd.read_csv(infile,header=0) num_images = len(fullres) image_data = fullres.iloc[ :num_images, 1:].values labels = fullres.iloc[ :num_images, 0].values #Unflatten to proper 2D shape and extract the connectivity graph images = image_data.reshape(num_images,28,28) connectivity = grid_to_graph(*images[0].shape) #Do the clustering in feature space agglo = cluster.FeatureAgglomeration(connectivity=connectivity, n_clusters=n_clusters) agglo.fit(image_data) # Save agglo for future use in workflow. # Must use this same agglo for test data barrel=open(reducer_fn,'wb') pickle.dump(agglo,barrel) barrel.close() #Transform the original data into reduced feature space image_data_reduced = agglo.transform(image_data) #Reduced data out to file out = pd.DataFrame(image_data_reduced) out.insert(0, 'label', labels)#Add back the target labels out.to_csv(outfile,index=False) ### Uncomment below to see some demo approx images # import matplotlib.pyplot as plt #Go back to real space from eigenpixel space # image_data_approx = agglo.inverse_transform(image_data_reduced) # num_row, num_col = 5, 5 # #Unflatten to 2D # images_approx = np.reshape(image_data_approx, images.shape) # for idx in range(0,num_row*num_col): # plt.subplot(num_row, num_col, idx + 1) # plt.axis('off') # plt.imshow(images_approx[idx], cmap=plt.cm.gray_r, interpolation='nearest') # plt.show()
mit
john5223/airflow
airflow/hooks/base_hook.py
9
1407
from builtins import object import logging import random from airflow import settings from airflow.models import Connection from airflow.utils import AirflowException class BaseHook(object): """ Abstract base class for hooks, hooks are meant as an interface to interact with external systems. MySqlHook, HiveHook, PigHook return object that can handle the connection and interaction to specific instances of these systems, and expose consistent methods to interact with them. """ def __init__(self, source): pass def get_connections(self, conn_id): session = settings.Session() db = ( session.query(Connection) .filter(Connection.conn_id == conn_id) .all() ) if not db: raise AirflowException( "The conn_id `{0}` isn't defined".format(conn_id)) session.expunge_all() session.close() return db def get_connection(self, conn_id): conn = random.choice(self.get_connections(conn_id)) if conn.host: logging.info("Using connection to: " + conn.host) return conn def get_conn(self): raise NotImplemented() def get_records(self, sql): raise NotImplemented() def get_pandas_df(self, sql): raise NotImplemented() def run(self, sql): raise NotImplemented()
apache-2.0
CloverHealth/airflow
tests/hooks/test_hive_hook.py
2
16206
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datetime import itertools import os import pandas as pd import random import mock import unittest from collections import OrderedDict from hmsclient import HMSClient from airflow.exceptions import AirflowException from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook from airflow import DAG, configuration from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone from airflow.utils.tests import assertEqualIgnoreMultipleSpaces configuration.load_test_config() DEFAULT_DATE = timezone.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class HiveEnvironmentTest(unittest.TestCase): def setUp(self): configuration.load_test_config() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG('test_dag_id', default_args=args) self.next_day = (DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()[:10] self.database = 'airflow' self.partition_by = 'ds' self.table = 'static_babynames_partitioned' self.hql = """ CREATE DATABASE IF NOT EXISTS {{ params.database }}; USE {{ params.database }}; DROP TABLE IF EXISTS {{ params.table }}; CREATE TABLE IF NOT EXISTS {{ params.table }} ( state string, year string, name string, gender string, num int) PARTITIONED BY ({{ params.partition_by }} string); ALTER TABLE {{ params.table }} ADD PARTITION({{ params.partition_by }}='{{ ds }}'); """ self.hook = HiveMetastoreHook() t = HiveOperator( task_id='HiveHook_' + str(random.randint(1, 10000)), params={ 'database': self.database, 'table': self.table, 'partition_by': self.partition_by }, hive_cli_conn_id='beeline_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def tearDown(self): hook = HiveMetastoreHook() with hook.get_conn() as metastore: metastore.drop_table(self.database, self.table, deleteData=True) class TestHiveCliHook(unittest.TestCase): def test_run_cli(self): hook = HiveCliHook() hook.run_cli("SHOW DATABASES") @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli') def test_load_file(self, mock_run_cli): filepath = "/path/to/input/file" table = "output_table" hook = HiveCliHook() hook.load_file(filepath=filepath, table=table, create=False) query = ( "LOAD DATA LOCAL INPATH '{filepath}' " "OVERWRITE INTO TABLE {table} \n" .format(filepath=filepath, table=table) ) mock_run_cli.assert_called_with(query) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df(self, mock_to_csv, mock_load_file): df = pd.DataFrame({"c": ["foo", "bar", "baz"]}) table = "t" delimiter = "," encoding = "utf-8" hook = HiveCliHook() hook.load_df(df=df, table=table, delimiter=delimiter, encoding=encoding) mock_to_csv.assert_called_once() kwargs = mock_to_csv.call_args[1] self.assertEqual(kwargs["header"], False) self.assertEqual(kwargs["index"], False) self.assertEqual(kwargs["sep"], delimiter) mock_load_file.assert_called_once() kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["delimiter"], delimiter) self.assertEqual(kwargs["field_dict"], {"c": u"STRING"}) self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict)) self.assertEqual(kwargs["table"], table) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file): hook = HiveCliHook() b = (True, False) for create, recreate in itertools.product(b, b): mock_load_file.reset_mock() hook.load_df(df=pd.DataFrame({"c": range(0, 10)}), table="t", create=create, recreate=recreate) mock_load_file.assert_called_once() kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["create"], create) self.assertEqual(kwargs["recreate"], recreate) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli') def test_load_df_with_data_types(self, mock_run_cli): d = OrderedDict() d['b'] = [True] d['i'] = [-1] d['t'] = [1] d['f'] = [0.0] d['c'] = ['c'] d['M'] = [datetime.datetime(2018, 1, 1)] d['O'] = [object()] d['S'] = ['STRING'.encode('utf-8')] d['U'] = ['STRING'] d['V'] = [None] df = pd.DataFrame(d) hook = HiveCliHook() hook.load_df(df, 't') query = """ CREATE TABLE IF NOT EXISTS t ( b BOOLEAN, i BIGINT, t BIGINT, f DOUBLE, c STRING, M TIMESTAMP, O STRING, S STRING, U STRING, V STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS textfile ; """ assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query) class TestHiveMetastoreHook(HiveEnvironmentTest): VALID_FILTER_MAP = {'key2': 'value2'} def test_get_max_partition_from_empty_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs([], 'key1', self.VALID_FILTER_MAP) self.assertIsNone(max_partition) def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', {'key3': 'value5'}) def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key3', self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], None, self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', None) # No partition will be filtered out. self.assertEqual(max_partition, b'value3') def test_get_max_partition_from_valid_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', self.VALID_FILTER_MAP) self.assertEqual(max_partition, b'value1') def test_get_metastore_client(self): self.assertIsInstance(self.hook.get_metastore_client(), HMSClient) def test_get_conn(self): self.assertIsInstance(self.hook.get_conn(), HMSClient) def test_check_for_partition(self): partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) missing_partition = "{p_by}='{date}'".format(date=self.next_day, p_by=self.partition_by) self.assertTrue( self.hook.check_for_partition(self.database, self.table, partition) ) self.assertFalse( self.hook.check_for_partition(self.database, self.table, missing_partition) ) def test_check_for_named_partition(self): partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) missing_partition = "{p_by}={date}".format(date=self.next_day, p_by=self.partition_by) self.assertTrue( self.hook.check_for_named_partition(self.database, self.table, partition) ) self.assertFalse( self.hook.check_for_named_partition(self.database, self.table, missing_partition) ) def test_get_table(self): table_info = self.hook.get_table(db=self.database, table_name=self.table) self.assertEqual(table_info.tableName, self.table) columns = ['state', 'year', 'name', 'gender', 'num'] self.assertEqual([col.name for col in table_info.sd.cols], columns) def test_get_tables(self): tables = self.hook.get_tables(db=self.database, pattern=self.table + "*") self.assertIn(self.table, {table.tableName for table in tables}) def test_get_databases(self): databases = self.hook.get_databases(pattern='*') self.assertIn(self.database, databases) def test_get_partitions(self): partitions = self.hook.get_partitions(schema=self.database, table_name=self.table) self.assertEqual(len(partitions), 1) self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}]) def test_max_partition(self): filter_map = {self.partition_by: DEFAULT_DATE_DS} partition = self.hook.max_partition(schema=self.database, table_name=self.table, field=self.partition_by, filter_map=filter_map) self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8')) def test_table_exists(self): self.assertTrue(self.hook.table_exists(self.table, db=self.database)) self.assertFalse( self.hook.table_exists(str(random.randint(1, 10000))) ) class TestHiveServer2Hook(unittest.TestCase): def _upload_dataframe(self): df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]}) self.local_path = '/tmp/TestHiveServer2Hook.csv' df.to_csv(self.local_path, header=False, index=False) def setUp(self): configuration.load_test_config() self._upload_dataframe() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG('test_dag_id', default_args=args) self.database = 'airflow' self.table = 'hive_server_hook' self.hql = """ CREATE DATABASE IF NOT EXISTS {{ params.database }}; USE {{ params.database }}; DROP TABLE IF EXISTS {{ params.table }}; CREATE TABLE IF NOT EXISTS {{ params.table }} ( a int, b int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; LOAD DATA LOCAL INPATH '{{ params.csv_path }}' OVERWRITE INTO TABLE {{ params.table }}; """ self.columns = ['{}.a'.format(self.table), '{}.b'.format(self.table)] self.hook = HiveMetastoreHook() t = HiveOperator( task_id='HiveHook_' + str(random.randint(1, 10000)), params={ 'database': self.database, 'table': self.table, 'csv_path': self.local_path }, hive_cli_conn_id='beeline_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def tearDown(self): hook = HiveMetastoreHook() with hook.get_conn() as metastore: metastore.drop_table(self.database, self.table, deleteData=True) os.remove(self.local_path) def test_get_conn(self): hook = HiveServer2Hook() hook.get_conn() def test_get_records(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_records(query, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)]) def test_get_pandas_df(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) df = hook.get_pandas_df(query, schema=self.database) self.assertEqual(len(df), 2) self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) def test_get_results_header(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual([col[0] for col in results['header']], self.columns) def test_get_results_data(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual(results['data'], [(1, 1), (2, 2)]) def test_to_csv(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) csv_filepath = 'query_results.csv' hook.to_csv(query, csv_filepath, schema=self.database, delimiter=',', lineterminator='\n', output_header=True) df = pd.read_csv(csv_filepath, sep=',') self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) self.assertEqual(len(df), 2) def test_multi_statements(self): sqls = [ "CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)", "SELECT * FROM {}".format(self.table), "DROP TABLE test_multi_statements", ] hook = HiveServer2Hook() results = hook.get_records(sqls, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)])
apache-2.0
rbdavid/Distance_matrix
Test_Case1/plotting_functions.py
4
13432
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python # USAGE: # from fn_plotting.py import * # PREAMBLE: import numpy as np import sys import os import matplotlib.pyplot as plt import matplotlib.colors as mcolors import matplotlib as mpl from matplotlib.ticker import NullFormatter stdev = np.std sqrt = np.sqrt nullfmt = NullFormatter() # ---------------------------------------- # PLOTTING SUBROUTINES def make_colormap(seq): """Return a LinearSegmentedColormap seq: a sequence of floats and RGB-tuples. The floats should be increasing and in the interval (0,1). """ seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3] cdict = {'red': [], 'green': [], 'blue': []} for i, item in enumerate(seq): if isinstance(item, float): r1, g1, b1 = seq[i - 1] r2, g2, b2 = seq[i + 1] cdict['red'].append([item, r1, r2]) cdict['green'].append([item, g1, g2]) cdict['blue'].append([item, b1, b2]) return mcolors.LinearSegmentedColormap('CustomMap', cdict) def plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = False, t0 = 0, **kwargs): """ Creates a 1D scatter/line plot: Usage: plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = [False|True], t0 = 0) Arguments: xdata, ydata: self-explanatory color: color to be used to plot data x_axis, y_axis: strings to be used for the axis label system: descriptor for the system that produced the data analysis: descriptor for the analysis that produced the data average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data # THERE IS A BUG IF average=True; must read in yunits for this function to work at the moment. t0: index to begin averaging from; Default is 0 kwargs: xunits, yunits: string with correct math text describing the units for the x/y data x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot plt_title: string to be added as the plot title draw_line: int value that determines the line style to be drawn; giving myself space to add more line styles if I decide I need them """ # INITIATING THE PLOT... plt.plot(xdata, ydata, '%s' %(color)) # READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES for name, value in kwargs.items(): if name == 'xunits': x_units = value x_axis = '%s (%s)' %(x_axis, value) elif name == 'yunits': y_units = value y_axis = '%s (%s)' %(y_axis, value) elif name == 'x_lim': plt.xlim(value) elif name == 'y_lim': plt.ylim(value) elif name == 'plt_title': plt.title(r'%s' %(value), size='14') elif name == 'draw_line': draw_line = value if draw_line == 1: plt.plot([0,max(ydata)],[0,max(ydata)],'r-',linewidth=2) else: print 'draw_line = %s has not been defined in plotting_functions script' %(line_value) plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') plt.xlabel(r'%s' %(x_axis), size=12) plt.ylabel(r'%s' %(y_axis), size=12) # CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA if average != False: avg = np.sum(ydata[t0:])/len(ydata[t0:]) SD = stdev(ydata[t0:]) SDOM = SD/sqrt(len(ydata[t0:])) plt.axhline(avg, xmin=0.0, xmax=1.0, c='r') plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12) plt.savefig('%s.%s.plot1d.png' %(system,analysis),dpi=300) plt.close() def hist1d(data, x_axis, system, analysis, num_b = 100, norm = False, average = False, t0 = 0, **kwargs): """ Creates a 1D histogram: Usage: hist1d(data, x_axis, num_b, system, analysis, norm) Arguments: data: self-explanatory x_axis: string to be used for the axis label system: descriptor for the system analyzed analysis: descriptor for the analysis performed and plotted num_b: number of bins to be used when binning the data; Default is 100 norm = [False][True]; Default is False; if False, plotting a frequency of data; if True, plotting a probability density average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data t0: index to begin averaging from; Default is 0 kwargs: xunits: string with correct math text describing the units for the x data x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot plt_title: string to be added as the plot title """ # INITIATING THE PLOT... events, edges, patches = plt.hist(data, bins=num_b, histtype = 'bar', normed=norm) # READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES for name, value in kwargs.items(): if name == 'xunits': x_units = value x_axis = '%s (%s)' %(x_axis, value) elif name == 'x_lim': plt.xlim(value) elif name == 'y_lim': plt.ylim(value) elif name == 'plt_title': plt.title(r'%s' %(value), size='14') plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') plt.xlabel(r'%s' %(x_axis), size=12) # CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA if average != False: avg = np.sum(data[t0:])/len(data[t0:]) SD = stdev(data[t0:]) SDOM = SD/sqrt(len(data[t0:])) plt.axvline(avg, ymin=0.0, ymax=1.0, c='r') plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, x_units, SD, x_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12) if norm == True: plt.ylabel('Probability Density') plt.savefig('%s.%s.prob1d.png' %(system,analysis),dpi=300) nf = open('%s.%s.prob1d.dat' %(system,analysis),'w') else: plt.ylabel('Frequency', size=12) plt.savefig('%s.%s.hist1d.png' %(system,analysis),dpi=300) nf = open('%s.%s.hist1d.dat' %(system,analysis), 'w') for i in range(len(events)): nf.write('%10.1f %10.4f\n' %(events[i], edges[i])) plt.close() nf.close() events = [] edges = [] patches = [] def scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b = 100, average = False, t0 = 0, **kwargs): """ Creates 1D scatter plot w/ a 1D histogram Usage: scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b) Arguments: xdata, ydata: self-explanatory color: color to be used to plot data x_axis, y_axis: strings to be printed on the axi labels system: descriptor for the system analyzed analysis: descriptor for the analysis performed and plotted num_b: number of bins to be used when binning the data; Default is 100 average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data # THERE IS A BUG; if average = True, need to read in xunits for this function to work... t0: index to begin averaging from; Default is 0 kwargs: xunits, yunits: string with correct math text describing the units for the x/y data x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot plt_title: string to be added as the plot title """ # INITIATING THE PLOT SIZES left, width = 0.1, 0.65 bottom, height = 0.1, 0.8 bottom_h = left_h = left+width+0.01 rect_scatter = [left, bottom, width, height] rect_histy = [left_h, bottom, 0.2, height] # INITIATING THE PLOT... plt.figure(1, figsize=(10,8)) axScatter =plt.axes(rect_scatter) axScatter.plot(xdata, ydata, '%s.' %(color)) # READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES for name, value in kwargs.items(): if name == 'xunits': x_units = value x_axis = '%s (%s)' %(x_axis, value) elif name == 'yunits': y_units = value y_axis = '%s (%s)' %(y_axis, value) elif name == 'x_lim': plt.xlim(value) elif name == 'y_lim': plt.ylim(value) elif name == 'plt_title': plt.title(r'%s' %(value), size='14') plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') # plt.xlim((0,500)) plt.ylabel(r'%s' %(y_axis),size=12) plt.xlabel(r'%s' %(x_axis),size=12) if average != False: avg = np.sum(ydata[t0:])/len(ydata[t0:]) SD = stdev(ydata[t0:]) SDOM = SD/sqrt(len(ydata[t0:])) plt.axhline(avg, xmin=0.0, xmax=1.0, c='r') axHisty = plt.axes(rect_histy) axHisty.yaxis.set_major_formatter(nullfmt) axHisty.xaxis.set_major_formatter(nullfmt) plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') axHisty.hist(ydata, bins=num_b, orientation='horizontal', color = ['gray']) axHisty.set_ylim(axScatter.get_ylim()) # CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA if average != False: plt.axhline(avg, xmin=0.0, xmax=1.0, c='r') plt.figtext(0.775, 0.810, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12) plt.savefig('%s.%s.scat_hist.png' %(system, analysis),dpi=300) plt.close() def bar(xdata, ydata, x_axis, y_axis, system, analysis, **kwargs): """ Creates a bar graph Usage: bar(xdata, ydata, x_axis, y_axis, **kwarg) Arguments: xdata, ydata: self-explanatory x_axis, y_axis: strings to be printed on the axi labels system: descriptor for the system analyzed analysis: descriptor for the analysis performed and plotted kwargs: xunits, yunits: string with correct math text describing the units for the x/y data x_lim, y_lim: list (or tuple) w/ two elements, setting the limits of the x/y ranges of plot plt_title: string to be added as the plot title """ # INITIATING THE PLOT... plt.bar(xdata,ydata) # READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES for name, value in kwargs.items(): if name == 'xunits': x_units = value x_axis = '%s (%s)' %(x_axis, value) elif name == 'yunits': y_units = value y_axis = '%s (%s)' %(y_axis, value) elif name == 'x_lim': plt.xlim(value) elif name == 'y_lim': plt.ylim(value) elif name == 'plt_title': plt.title(r'%s' %(value), size='16') plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') plt.ylabel(r'%s' %(y_axis),size=12) plt.xlabel(r'%s' %(x_axis),size=12) plt.savefig('%s.%s.bar.png' %(system,analysis),dpi=300) plt.close() def hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm): """ Creates a 2D histogram (heat map) Usage: hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm) Arguments: xdata, ydata: self-explanatory x_axis, y_axis: strings to be printed on the axi labels num_b: number of bins to be used when binning the data system: descriptor for the system analyzed analysis: descriptor for the analysis performed and plotted norm = [False][True]; if False, plotting a frequency of data; if True, plotting a probability density """ my_cmap = plt.cm.get_cmap('jet') my_cmap.set_under('w') counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins=num_b, normed=norm, cmap=my_cmap, vmin=0.001)#, cmap=plt.get_cmap('jet')) # cmap: jet (blue to red), blues (white to blue), ... cb1 = plt.colorbar() if norm == True: cb1.set_label('Prob. Density', size=12) else: cb1.set_label('Frequency') # plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b)) # plt.xlim((0,8)) # plt.ylim((0,8)) plt.xlabel(r'%s' %(x_axis), size=12) plt.ylabel(r'%s' %(y_axis), size=12) plt.savefig('%s.%s.hist2d.png' %(system, analysis),dpi=300) plt.close() counts = [] xedges = [] yedges = [] image = [] def matrix2d(matrix, x_axis, y_axis, cb_axis, system, analysis, **kwargs): """ Creates a 2D matrix image Usage: matrix2d(matrix,x_axis,y_axis,system,analysis) Arguments: matrix: the data matrix to be plotted (should have shape of MxN, but can have MxNx3 or MxNx4) x_axis, y_axis: strings to be printed on the axi labels system: descriptor for the system analyzed analysis: descriptor for the analysis performed and plotted kwargs: vmin, vmax: floats that define the limits for the color bar; if below vmin, data will be colored white; if above vmax, data will be colored red (might want to change this for aesthetics) plt_title: string to be added as the plot title cb_units: sting to be added to the color bar label to indicate the units of the color bar xlim, ylim: list (or tuple) w/ two elements, setting the limits of the x/y ranges of plot """ vmin =0.001 vmax = None # c = mcolors.ColorConverter().to_rgb # bgr = make_colormap([c('blue'),c('lime'),0.50,c('lime'),c('red'),1.00,c('red')]) # bgr = make_colormap([c('red'),c('lime'),0.50,c('lime'),c('blue'),1.00,c('blue')]) # bgr.set_under('k') # bgr.set_over('r') # bgr.set_over('w') # my_cmap = bgr my_cmap = plt.cm.get_cmap('jet') # my_cmap = plt.cm.get_cmap('gray') # READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES for name, value in kwargs.items(): if name == 'vmin': vmin = value elif name == 'vmax': vmax = value elif name == 'cb_units': cb_units = value cb_axis = '%s (%s)' %(cb_axis, value) elif name == 'plt_title': plt.title(r'%s' %(value), size='14') elif name == 'xlim': plt.xlim(value) elif name == 'ylim': plt.ylim(value) plt.pcolor(matrix,cmap=my_cmap,vmin=vmin,vmax=vmax) # ,interpolation='none',origin='lower' cb1 = plt.colorbar(extend='max',cmap=my_cmap) cb1.set_label(r'%s' %(cb_axis), size=14) plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--') plt.xlabel(r'%s' %(x_axis), size=14) plt.ylabel(r'%s' %(y_axis), size=14) plt.savefig('%s.%s.heatmap.png' %(system, analysis),dpi=300) plt.close()
gpl-3.0
scollis/AGU_2016
scripts/tmatrix_par.py
1
5013
import pyart import netCDF4 import numpy as np import platform from matplotlib import pyplot as plt from glob import glob import os from datetime import datetime from scipy import interpolate import fnmatch import matplotlib.dates as mdates from pytmatrix.tmatrix import TMatrix, Scatterer from pytmatrix.tmatrix_psd import TMatrixPSD, GammaPSD from pytmatrix import orientation, radar, tmatrix_aux, refractive from pytmatrix.psd import PSDIntegrator, GammaPSD from IPython.parallel import Client import pickle #index all distrometer files def get_file_tree(start_dir, pattern): """ Make a list of all files matching pattern above start_dir Parameters ---------- start_dir : string base_directory pattern : string pattern to match. Use * for wildcard Returns ------- files : list list of strings """ files = [] for dir, _, _ in os.walk(start_dir): files.extend(glob(os.path.join(dir, pattern))) return files def scatter_off_2dvd_packed(dicc): def drop_ar(D_eq): if D_eq < 0.7: return 1.0; elif D_eq < 1.5: return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \ 8.5e-3*D_eq**4 else: return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \ 4.095e-5*D_eq**4 d_diameters = dicc['1'] d_densities = dicc['2'] mypds = interpolate.interp1d(d_diameters,d_densities, bounds_error=False, fill_value=0.0) scatterer = Scatterer(wavelength=tmatrix_aux.wl_C, m=refractive.m_w_10C[tmatrix_aux.wl_C]) scatterer.psd_integrator = PSDIntegrator() scatterer.psd_integrator.axis_ratio_func = lambda D: 1.0/drop_ar(D) scatterer.psd_integrator.D_max = 10.0 scatterer.psd_integrator.geometries = (tmatrix_aux.geom_horiz_back, tmatrix_aux.geom_horiz_forw) scatterer.or_pdf = orientation.gaussian_pdf(20.0) scatterer.orient = orientation.orient_averaged_fixed scatterer.psd_integrator.init_scatter_table(scatterer) scatterer.psd = mypds # GammaPSD(D0=2.0, Nw=1e3, mu=4) radar.refl(scatterer) zdr=radar.Zdr(scatterer) z=radar.refl(scatterer) scatterer.set_geometry(tmatrix_aux.geom_horiz_forw) kdp=radar.Kdp(scatterer) A=radar.Ai(scatterer) return z,zdr,kdp,A my_system = platform.system() if my_system == 'Darwin': top = '/data/sample_sapr_data/sgpstage/sur/' s_dir = '/data/sample_sapr_data/sgpstage/interp_sonde/' d_dir = '/data/agu2016/dis/' odir_r = '/data/agu2016/radars/' odir_s = '/data/agu2016/stats/' odir_i = '/data/agu2016/images/' elif my_system == 'Linux': top = '/lcrc/group/earthscience/radar/sgpstage/sur/' s_dir = '/lcrc/group/earthscience/radar/sgpstage/interp_sonde/' odir_r = '/lcrc/group/earthscience/radar/agu2016/radars/' odir_s = '/lcrc/group/earthscience/radar/agu2016/stats/' odir_i = '/lcrc/group/earthscience/radar/agu2016/images/' d_dir = '/lcrc/group/earthscience/radar/sgpstage/dis/' all_dis_files = get_file_tree(d_dir, 'sgpvdisC1*.000000.cdf') all_dis_files.sort() for filename in all_dis_files[50::]: print(filename) sfx = filename.split('/')[-1].split('.')[2] ofile = d_dir + sfx + 'proccessed.pc' print(ofile) #filename = '/lcrc/group/earthscience/radar/sgpstage/dis/sgpvdisC1.b1.20110425.000000.cdf' distrodata = netCDF4.Dataset(filename, 'r') d_time = netCDF4.num2date(distrodata.variables['time'][:], distrodata.variables['time'].units) m_numtime = mdates.date2num(d_time) diameters = distrodata.variables['drop_diameter'][:] densities = distrodata.variables['num_density'][:] drops = distrodata.variables['num_drops'][:] rwc = distrodata.variables['liquid_water_content'][:] distrodata.close() good_returns = np.where(rwc > 1.0)[0] n_dsds = len(good_returns) #print('Getting ready to scatter off ',n_dsds,' DSDs') n_dists = len(rwc) z2dvd = np.zeros(n_dists) kdp2dvd = np.zeros(n_dists) zdr2dvd = np.zeros(n_dists) A2dvd = np.zeros(n_dists) My_Cluster = Client() My_View = My_Cluster[:] print My_View print len(My_View) good = True #Turn off blocking so all engines can work async My_View.block = False My_View.execute('from scipy import interpolate') My_View.execute('from pytmatrix.tmatrix import TMatrix, Scatterer') My_View.execute('from pytmatrix.tmatrix_psd import TMatrixPSD, GammaPSD, PSDIntegrator') My_View.execute('from pytmatrix import orientation, radar, tmatrix_aux, refractive') print('Making the map!') mapme = [] for i in range(len(rwc)): mapme.append({'1':diameters, '2':densities[i,:]}) result = My_View.map_async(scatter_off_2dvd_packed, mapme) #result = My_View.map_async(test_script, packing[0:100]) #Reduce the result to get a list of output qvps = result.get() print(qvps) pickle.dump( qvps, open( ofile, "wb" ) )
bsd-3-clause
geodynamics/burnman
misc/benchmarks/benchmark.py
3
29420
from __future__ import absolute_import # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences # Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU # GPL v2 or later. import os.path import sys sys.path.insert(1, os.path.abspath('../..')) import numpy as np import matplotlib.pyplot as plt import burnman import burnman.eos.birch_murnaghan as bm import burnman.eos.birch_murnaghan_4th as bm4 import burnman.eos.mie_grueneisen_debye as mgd import burnman.eos.slb as slb import burnman.eos.vinet as vinet import matplotlib.image as mpimg def check_birch_murnaghan(): """ Recreates Stixrude and Lithgow-Bertelloni (2005) Figure 1, bulk and shear modulus without thermal corrections """ plt.close() # make a test mineral test_mineral = burnman.Mineral() test_mineral.params = {'name': 'test', 'V_0': 6.844e-6, 'K_0': 259.0e9, 'Kprime_0': 4.0, 'G_0': 175.0e9, 'Gprime_0': 1.7, 'molar_mass': .0, } test_mineral.set_method('bm3') pressure = np.linspace(0., 140.e9, 100) volume = np.empty_like(pressure) bulk_modulus = np.empty_like(pressure) shear_modulus = np.empty_like(pressure) # calculate its static properties for i in range(len(pressure)): volume[i] = bm.volume(pressure[i], test_mineral.params) bulk_modulus[i] = bm.bulk_modulus(volume[i], test_mineral.params) shear_modulus[i] = bm.shear_modulus_third_order( volume[i], test_mineral.params) # third order is used for the plot we are comparing against # compare with figure 1 plt.plot(pressure / 1.e9, bulk_modulus / 1.e9, pressure / 1.e9, shear_modulus / 1.e9) fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig1.png') plt.imshow(fig1, extent=[0, 140, 0, 800], aspect='auto') plt.plot(pressure / 1.e9, bulk_modulus / 1.e9, 'g+', pressure / 1.e9, shear_modulus / 1.e9, 'g+') plt.ylim(0, 800) plt.xlim(0, 140) plt.xlabel("Pressure (GPa)") plt.ylabel("Modulus (GPa)") plt.title( "Comparing with Figure 1 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() def check_birch_murnaghan_4th(): """ Recreates the formulation of the 4th order Birch-Murnaghan EOS as in Ahmad and Alkammash, 2012; Figure 1. """ plt.close() # make a test mineral test_mineral = burnman.Mineral() test_mineral.params = {'name': 'test', 'V_0': 10.e-6, 'K_0': 72.7e9, 'Kprime_0': 4.14, 'Kprime_prime_0': -0.0484e-9, } test_mineral.set_method('bm4') pressure = np.linspace(0., 90.e9, 20) volume = np.empty_like(pressure) # calculate its static properties for i in range(len(pressure)): volume[i] = bm4.volume_fourth_order( pressure[i], test_mineral.params) / test_mineral.params.get('V_0') # compare with figure 1 plt.plot(pressure / 1.e9, volume) fig1 = mpimg.imread('../../burnman/data/input_figures/Ahmad.png') plt.imshow(fig1, extent=[0., 90., .65, 1.], aspect='auto') plt.plot(pressure / 1.e9, volume, marker='o', color='r', linestyle='', label='BM4') plt.legend(loc='lower left') plt.xlim(0., 90.) plt.ylim(.65, 1.) plt.xlabel("Volume/V0") plt.ylabel("Pressure (GPa)") plt.title("Comparing with Figure 1 of Ahmad et al., (2012)") plt.show() def check_vinet(): """ Recreates Dewaele et al., 2006, Figure 1, fitting a Vinet EOS to Fe data """ plt.close() # make a test mineral test_mineral = burnman.Mineral() test_mineral.params = {'name': 'test', 'V_0': 6.75e-6, 'K_0': 163.4e9, 'Kprime_0': 5.38, } test_mineral.set_method('vinet') pressure = np.linspace(17.7e9, 300.e9, 20) volume = np.empty_like(pressure) # calculate its static properties for i in range(len(pressure)): volume[i] = vinet.volume(pressure[i], test_mineral.params) # compare with figure 1 plt.plot(pressure / 1.e9, volume / 6.02e-7) fig1 = mpimg.imread('../../burnman/data/input_figures/Dewaele.png') plt.imshow(fig1, extent=[0., 300., 6.8, 11.8], aspect='auto') plt.plot(pressure / 1.e9, volume / 6.02e-7, marker='o', color='r', linestyle='', label='Vinet Fit') plt.legend(loc='lower left') plt.xlim(0., 300.) plt.ylim(6.8, 11.8) plt.ylabel("Volume (Angstroms^3/atom") plt.xlabel("Pressure (GPa)") plt.title("Comparing with Figure 1 of Dewaele et al., (2006)") plt.show() def check_mgd_shim_duffy_kenichi(): """ Attemmpts to recreate Shim Duffy Kenichi (2002) """ plt.close() # Create gold material from Table 1 gold = burnman.Mineral() gold.params = {'name': 'gold', 'V_0': 10.22e-6, 'K_0': 167.0e9, 'Kprime_0': 5.0, 'G_0': 0.0e9, 'Gprime_0': 0.0, 'molar_mass': .196966, 'n': 1.0, 'Debye_0': 170., 'grueneisen_0': 2.97, # this does better with gr = 2.93. Why? 'q_0': 1.0} gold.set_method('mgd3') # Total pressures, pulled from Table 2 ref_pressures = [ np.array([0., 3.55, 7.55, 12.06, 17.16, 22.91, 29.42, 36.77, 45.11, 54.56, 65.29, 77.50, 91.42, 107.32, 125.51, 146.38, 170.38, 198.07])] ref_pressures.append( np.array([4.99, 8.53, 12.53, 17.04, 22.13, 27.88, 34.38, 41.73, 50.06, 59.50, 70.22, 82.43, 96.33, 112.22, 130.40, 151.25, 175.24, 202.90])) ref_pressures.append( np.array([12.14, 15.69, 19.68, 24.19, 29.28, 35.03, 41.53, 48.88, 57.20, 66.64, 77.37, 89.57, 103.47, 119.35, 137.53, 158.38, 182.36, 210.02])) ref_pressures.append( np.array([19.30, 22.84, 26.84, 31.35, 36.44, 42.19, 48.68, 56.03, 64.35, 73.80, 84.52, 96.72, 110.62, 126.50, 144.68, 165.53, 189.51, 217.17])) eos = mgd.MGD3() pressures = np.empty_like(ref_pressures) ref_dv = np.linspace(0.0, 0.34, len(pressures[0])) ref_volumes = (1 - ref_dv) * gold.params['V_0'] T = np.array([300., 1000., 2000., 3000.]) for t in range(len(pressures)): for i in range(len(pressures[t])): pressures[t][i] = eos.pressure(T[t], ref_volumes[i], gold.params) plt.plot(ref_dv, (pressures[t] / 1.e9 - ref_pressures[t])) plt.ylim(-1, 1) plt.ylabel("Difference in pressure (GPa)") plt.xlabel("1-dV/V") plt.title("Comparing with Shim, Duffy, and Kenichi (2002)") plt.show() def check_mgd_fei_mao_shu_hu(): """ Benchmark agains Fei Mao Shu Hu (1991) """ mgfeo = burnman.Mineral() mgfeo.params = {'name': 'MgFeO', 'V_0': 11.657e-6, 'K_0': 157.0e9, 'Kprime_0': 4.0, 'G_0': 0.0e9, 'Gprime_0': 0.0, 'molar_mass': .196966, 'n': 2.0, 'Debye_0': 500., 'grueneisen_0': 1.50, 'q_0': 1.1} mgfeo.set_method('mgd3') # pulled from table 1 temperatures = np.array( [300, 300, 483, 483, 483, 590, 593, 593, 593, 700, 600, 500, 650, 600, 600, 650, 700, 737, 727, 673, 600, 543, 565, 585, 600, 628, 654, 745, 768, 747, 726, 700, 676]) volumes = np.array( [77.418, 72.327, 74.427, 73.655, 72.595, 74.1, 73.834, 73.101, 70.845, 73.024, 72.630, 68.644, 72.969, 72.324, 71.857, 72.128, 73.283, 73.337, 72.963, 71.969, 69.894, 67.430, 67.607, 67.737, 68.204, 68.518, 68.955, 70.777, 72.921, 72.476, 72.152, 71.858, 71.473]) # change from cubic angstroms per unit cell to cubic meters per mol of # molecules. volumes = volumes / 1.e30 * 6.022141e23 / 4.0 ref_pressures = np.array( [0.0, 12.23, 7.77, 9.69, 12.54, 9.21, 9.90, 11.83, 18.35, 12.68, 13.15, 25.16, 12.53, 14.01, 15.34, 14.86, 11.99, 12.08, 13.03, 15.46, 21.44, 29.98, 29.41, 29.05, 27.36, 26.38, 24.97, 19.49, 13.39, 14.48, 15.27, 15.95, 16.94]) ref_pressures = ref_pressures pressures = np.empty_like(volumes) eos = mgd.MGD3() for i in range(len(temperatures)): pressures[i] = eos.pressure(temperatures[i], volumes[i], mgfeo.params) plt.scatter(temperatures, (pressures / 1.e9 - ref_pressures)) plt.ylim(-1, 1) plt.title("Comparing with Fei, Mao, Shu, and Hu (1991)") plt.xlabel("Temperature (K) at various volumes") plt.ylabel("Difference in total pressure (GPa)") plt.show() def check_slb_fig3(): """ Benchmark grueneisen parameter against figure 3 of Stixrude and Lithgow-Bertelloni (2005b) """ perovskite = burnman.Mineral() perovskite.params = {'name': 'perovksite', 'V_0': burnman.tools.molar_volume_from_unit_cell_volume(168.27, 4.), 'grueneisen_0': 1.63, 'q_0': 1.7} volume = np.linspace(0.6, 1.0, 100) grueneisen_slb = np.empty_like(volume) grueneisen_mgd = np.empty_like(volume) q_slb = np.empty_like(volume) q_mgd = np.empty_like(volume) slb_eos = slb.SLB2() mgd_eos = mgd.MGD2() # calculate its thermal properties for i in range(len(volume)): # call with dummy pressure and temperatures, they do not change it grueneisen_slb[i] = slb_eos.grueneisen_parameter( 0., 0., volume[i] * perovskite.params['V_0'], perovskite.params) grueneisen_mgd[i] = mgd_eos.grueneisen_parameter( 0., 0., volume[i] * perovskite.params['V_0'], perovskite.params) q_slb[i] = slb_eos.volume_dependent_q( 1. / volume[i], perovskite.params) q_mgd[i] = perovskite.params['q_0'] # compare with figure 7 fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig3.png') plt.imshow(fig1, extent=[0.6, 1.0, 0.35, 2.0], aspect='auto') plt.plot(volume, grueneisen_slb, 'g+', volume, grueneisen_mgd, 'b+') plt.plot(volume, q_slb, 'g+', volume, q_mgd, 'b+') plt.xlim(0.6, 1.0) plt.ylim(0.35, 2.0) plt.ylabel("Grueneisen parameter") plt.xlabel("Relative Volume V/V0") plt.title( "Comparing with Figure 3 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() def check_slb_fig7_txt(): """ Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication) """ forsterite = burnman.Mineral() forsterite.params = {'name': 'forsterite', 'V_0': 43.603e-6, 'K_0': 127.955e9, 'Kprime_0': 4.232, 'G_0': 81.6e9, 'Gprime_0': 1.4, 'molar_mass': .140695, 'n': 7.0, 'Debye_0': 809.183, 'grueneisen_0': .993, 'q_0': 2.093, 'F_0': -1.1406e5, 'eta_s_0': 2.364} forsterite.set_method('slb3') data = np.loadtxt( "../../burnman/data/input_minphys/slb_fig7.txt", skiprows=2) temperature = np.array(data[:, 2]) pressure = np.array(data[:, 0]) rho = np.array(data[:, 3]) rho_comp = np.empty_like(rho) Kt = np.array(data[:, 4]) Kt_comp = np.empty_like(Kt) Ks = np.array(data[:, 5]) Ks_comp = np.empty_like(Ks) G = np.array(data[:, 6]) G_comp = np.empty_like(G) VB = np.array(data[:, 7]) VB_comp = np.empty_like(VB) VS = np.array(data[:, 8]) VS_comp = np.empty_like(VS) VP = np.array(data[:, 9]) VP_comp = np.empty_like(VP) vol = np.array(data[:, 10]) vol_comp = np.empty_like(vol) alpha = np.array(data[:, 11]) alpha_comp = np.empty_like(alpha) Cp = np.array(data[:, 12]) Cp_comp = np.empty_like(Cp) gr = np.array(data[:, 13]) gr_comp = np.empty_like(gr) gibbs = np.array(data[:, 14]) gibbs_comp = np.empty_like(gibbs) entropy = np.array(data[:, 15]) entropy_comp = np.empty_like(gibbs) enthalpy = np.array(data[:, 16]) enthalpy_comp = np.empty_like(gibbs) for i in range(len(temperature)): forsterite.set_state(pressure[i], temperature[i]) rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i] Kt_comp[i] = 100. * ( forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i] Ks_comp[i] = 100. * ( forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i] G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i] VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i] VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i] VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i] vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i] alpha_comp[i] = 100. * ( forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1]) Cp_comp[i] = 100. * (forsterite.molar_heat_capacity_p / forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1]) gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i] gibbs_comp[i] = 100. * ( forsterite.molar_gibbs / 1.e6 - gibbs[i]) / gibbs[i] entropy_comp[i] = 100. * ( forsterite.molar_entropy - entropy[i]) / (entropy[i] if entropy[i] != 0. else 1.) enthalpy_comp[i] = 100. * ( forsterite.molar_enthalpy / 1.e6 - enthalpy[i]) / (enthalpy[i] if enthalpy[i] != 0. else 1.) plt.plot(temperature, rho_comp, label=r'$\rho$') plt.plot(temperature, Kt_comp, label=r'$K_S$') plt.plot(temperature, Ks_comp, label=r'$K_T$') plt.plot(temperature, G_comp, label=r'$G$') plt.plot(temperature, VS_comp, label=r'$V_S$') plt.plot(temperature, VP_comp, label=r'$V_P$') plt.plot(temperature, VB_comp, label=r'$V_\phi$') plt.plot(temperature, vol_comp, label=r'$V$') plt.plot(temperature, alpha_comp, label=r'$\alpha$') plt.plot(temperature, Cp_comp, label=r'$c_P$') plt.plot(temperature, gr_comp, label=r'$\gamma$') plt.plot(temperature, gibbs_comp, label=r'Gibbs') plt.plot(temperature, enthalpy_comp, label=r'Enthalpy') plt.plot(temperature, entropy_comp, label=r'Entropy') plt.xlim([0, 2750]) plt.ylim([-0.001, 0.001]) plt.xticks([0, 800, 1600, 2200]) plt.xlabel("Temperature (K)") plt.ylabel("Percent Difference from HeFESTo") plt.legend(loc="center right") # plt.savefig("output_figures/benchmark1.pdf") plt.show() def check_slb_fig7(): """ Calculates all values for forsterite and benchmarks with figure 7 from Stixrude and Lithgow-Bertelloni (2005) """ forsterite = burnman.Mineral() forsterite.params = {'name': 'forsterite', 'V_0': 43.60e-6, 'K_0': 128.0e9, 'Kprime_0': 4.2, 'G_0': 82.0e9, 'Gprime_0': 1.4, 'n': 7.0, 'molar_mass': .140695, 'Debye_0': 809., 'grueneisen_0': .99, 'q_0': 2.1, 'eta_s_0': 2.4} forsterite.set_method('slb3') temperature = np.linspace(0., 2000., 200) volume = np.empty_like(temperature) bulk_modulus = np.empty_like(temperature) shear_modulus = np.empty_like(temperature) heat_capacity = np.empty_like(temperature) pressure = 1.0e5 forsterite.set_state(pressure, 300.) Ks_0 = forsterite.adiabatic_bulk_modulus # calculate its thermal properties for i in range(len(temperature)): forsterite.set_state(pressure, temperature[i]) volume[i] = forsterite.molar_volume / forsterite.params['V_0'] bulk_modulus[i] = forsterite.adiabatic_bulk_modulus / Ks_0 shear_modulus[i] = forsterite.shear_modulus / forsterite.params['G_0'] heat_capacity[i] = forsterite.molar_heat_capacity_p / forsterite.params['n'] # compare with figure 7 fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_vol.png') plt.imshow(fig1, extent=[0, 2200, 0.99, 1.08], aspect='auto') plt.plot(temperature, volume, 'g+') plt.ylim(0.99, 1.08) plt.xlim(0, 2200) plt.xlabel("Temperature (K)") plt.ylabel("Relative Volume V/V0") plt.title( "Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_Cp.png') plt.imshow(fig1, extent=[0, 2200, 0., 70.], aspect='auto') plt.plot(temperature, heat_capacity, 'g+') plt.ylim(0, 70) plt.xlim(0, 2200) plt.xlabel("Temperature (K)") plt.ylabel("Heat Capacity Cp") plt.title( "Comparing with adiabatic_bulk_modulus7 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_K.png') plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto') plt.plot(temperature, bulk_modulus, 'g+') plt.ylim(0.6, 1.02) plt.xlim(0, 2200) plt.xlabel("Temperature (K)") plt.ylabel("Relative Bulk Modulus K/K0") plt.title( "Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_G.png') plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto') plt.plot(temperature, shear_modulus, 'g+') plt.ylim(0.6, 1.02) plt.xlim(0, 2200) plt.xlabel("Temperature (K)") plt.ylabel("Relative Shear Modulus G/G0") plt.title( "Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)") plt.show() def check_averaging(): """ Reproduce Figure 1a from Watt et. al. 1976 to check the Voigt, Reuss, Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite """ voigt = burnman.averaging_schemes.Voigt() reuss = burnman.averaging_schemes.Reuss() voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill() hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper() hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower() # create arrays for sampling in volume fraction volumes = np.linspace(0.0, 1.0, 100) v_bulk_modulus = np.empty_like(volumes) v_shear_modulus = np.empty_like(volumes) r_bulk_modulus = np.empty_like(volumes) r_shear_modulus = np.empty_like(volumes) vrh_bulk_modulus = np.empty_like(volumes) vrh_shear_modulus = np.empty_like(volumes) hsu_bulk_modulus = np.empty_like(volumes) hsu_shear_modulus = np.empty_like(volumes) hsl_bulk_modulus = np.empty_like(volumes) hsl_shear_modulus = np.empty_like(volumes) # MgO bulk and shear moduli taken from Landolt-Boernstein # - Group III Condensed Matter Volume 41B, 1999, pp 1-3 K2 = 152. # Bulk modulus, GPa G2 = 155. # Shear modulus, GPa # AgCl bulk and shear moduli (estimated from plot) G1 = G2 * 0.07 K1 = K2 * 0.27 for i in range(len(volumes)): v_bulk_modulus[i] = voigt.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) v_shear_modulus[i] = voigt.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) r_bulk_modulus[i] = reuss.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) r_shear_modulus[i] = reuss.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a1.png') plt.imshow(fig, extent=[0, 1.0, 0.25, 1.0], aspect='auto') plt.plot(volumes, v_bulk_modulus / K2, 'g-') plt.plot(volumes, r_bulk_modulus / K2, 'g-') plt.plot(volumes, vrh_bulk_modulus / K2, 'g-') plt.plot(volumes, hsu_bulk_modulus / K2, 'g-') plt.plot(volumes, hsl_bulk_modulus / K2, 'g-') plt.ylim(0.25, 1.00) plt.xlim(0, 1.0) plt.xlabel("Volume fraction") plt.ylabel("Averaged bulk modulus") plt.title("Comparing with Figure 1 of Watt et al 1976") plt.show() fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a2.png') plt.imshow(fig, extent=[0, 1.0, 0.0, 1.0], aspect='auto') plt.plot(volumes, v_shear_modulus / G2, 'g-') plt.plot(volumes, r_shear_modulus / G2, 'g-') plt.plot(volumes, vrh_shear_modulus / G2, 'g-') plt.plot(volumes, hsu_shear_modulus / G2, 'g-') plt.plot(volumes, hsl_shear_modulus / G2, 'g-') plt.ylim(0.0, 1.00) plt.xlim(0, 1.0) plt.xlabel("Volume fraction") plt.ylabel("Averaged shear modulus") plt.title("Comparing with Figure 1 of Watt et al 1976") plt.show() # also check against some numerical values given in Berryman (1995) for # porous glass K = 46.3 G = 30.5 # the value for porosity=0.46 in the table appears to be a typo. Remove # it here porosity = np.array( [0.0, 0.05, 0.11, 0.13, 0.25, 0.33, 0.36, 0.39, 0.44, 0.50, 0.70]) berryman_bulk_modulus = np.array( [46.3, 41.6, 36.6, 35.1, 27.0, 22.5, 21.0, 19.6, 17.3, 14.8, 7.7]) # 15.5 probably a typo? hsu_bulk_modulus_vals = np.empty_like(porosity) for i in range(len(porosity)): hsu_bulk_modulus_vals[i] = hashin_shtrikman_upper.average_bulk_moduli( [porosity[i], 1.0 - porosity[i]], [0.0, K], [0.0, G]) for i in range(len(volumes)): hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [0.0, K], [0.0, G]) fig = mpimg.imread('../../burnman/data/input_figures/berryman_fig4.png') plt.imshow(fig, extent=[0, 1.0, 0.0, 50.0], aspect='auto') plt.plot(volumes, hsu_bulk_modulus, 'g-') plt.scatter(porosity, hsu_bulk_modulus_vals, c='r') plt.scatter(porosity, berryman_bulk_modulus, c='y') plt.ylim(0.0, 50.0) plt.xlim(0, 1.0) plt.xlabel("Porosity") plt.ylabel("Averaged bulk modulus") plt.title("Comparing with Figure 4 of Berryman (1995)") plt.show() def check_averaging_2(): """ Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the Hashin-Shtrikman bounds for an elastic composite """ hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper() hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower() # create arrays for sampling in volume fraction volumes = np.linspace(0.0, 1.0, 100) hsu_bulk_modulus = np.empty_like(volumes) hsu_shear_modulus = np.empty_like(volumes) hsl_bulk_modulus = np.empty_like(volumes) hsl_shear_modulus = np.empty_like(volumes) # These values are from Hashin and Shtrikman (1963) K1 = 25.0 K2 = 60.7 G1 = 11.5 G2 = 41.8 for i in range(len(volumes)): hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli( [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]) hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli( [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]) hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli( [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]) hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli( [1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2]) fig = mpimg.imread( '../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png') plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect='auto') plt.plot(volumes, hsu_bulk_modulus, 'g-') plt.plot(volumes, hsl_bulk_modulus, 'g-') plt.ylim(K1, K2) plt.xlim(0, 1.0) plt.xlabel("Volume fraction") plt.ylabel("Averaged bulk modulus") plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)") plt.show() fig = mpimg.imread( '../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png') plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect='auto') plt.plot(volumes, hsu_shear_modulus, 'g-') plt.plot(volumes, hsl_shear_modulus, 'g-') plt.ylim(G1, G2) plt.xlim(0, 1.0) plt.xlabel("Volume fraction") plt.ylabel("Averaged shear modulus") plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)") plt.show() def check_averaging_3(): """ Reproduce Figure 3 from Avseth et al. (2010) to check the Voigt, Reuss, Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite """ voigt = burnman.averaging_schemes.Voigt() reuss = burnman.averaging_schemes.Reuss() voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill() hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper() hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower() # create arrays for sampling in volume fraction volumes = np.linspace(0.0, 1.0, 100) v_bulk_modulus = np.empty_like(volumes) v_shear_modulus = np.empty_like(volumes) r_bulk_modulus = np.empty_like(volumes) r_shear_modulus = np.empty_like(volumes) vrh_bulk_modulus = np.empty_like(volumes) vrh_shear_modulus = np.empty_like(volumes) hsu_bulk_modulus = np.empty_like(volumes) hsu_shear_modulus = np.empty_like(volumes) hsl_bulk_modulus = np.empty_like(volumes) hsl_shear_modulus = np.empty_like(volumes) hs_av_bulk_modulus = np.empty_like(volumes) hs_av_shear_modulus = np.empty_like(volumes) # Quartz bulk and shear moduli K2 = 37. G2 = 45. # Fluid bulk and shear moduli G1 = 0.00001 K1 = 2.35 for i in range(len(volumes)): v_bulk_modulus[i] = voigt.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) v_shear_modulus[i] = voigt.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) r_bulk_modulus[i] = reuss.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) r_shear_modulus[i] = reuss.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli( [volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2]) hs_av_bulk_modulus[i] = 0.5 * hsl_bulk_modulus[ i] + 0.5 * hsu_bulk_modulus[i] hs_av_shear_modulus[i] = 0.5 * hsl_shear_modulus[ i] + 0.5 * hsu_shear_modulus[i] fig = mpimg.imread( '../../burnman/data/input_figures/Avseth_et_al_2010_fig3_K.png') plt.imshow(fig, extent=[0, 1.0, 0., 40.0], aspect='auto') plt.plot(volumes, v_bulk_modulus, 'g-') plt.plot(volumes, r_bulk_modulus, 'g-') plt.plot(volumes, vrh_bulk_modulus, 'g-') plt.plot(volumes, hsu_bulk_modulus, 'g-') plt.plot(volumes, hsl_bulk_modulus, 'g-') plt.plot(volumes, hs_av_bulk_modulus, 'g-') plt.ylim(0., 40.00) plt.xlim(0., 1.0) plt.xlabel("Volume fraction") plt.ylabel("Averaged bulk modulus") plt.title("Comparing with Figure 3 of Avseth et al., 2010") plt.show() if __name__ == "__main__": check_averaging() check_averaging_2() check_averaging_3() check_birch_murnaghan() check_birch_murnaghan_4th() check_vinet() check_slb_fig7() check_slb_fig3() check_mgd_shim_duffy_kenichi() check_mgd_fei_mao_shu_hu() check_slb_fig7_txt()
gpl-2.0
nikitasingh981/scikit-learn
sklearn/neighbors/tests/test_approximate.py
30
19128
""" Testing for the approximate neighbor search using Locality Sensitive Hashing Forest module (sklearn.neighbors.LSHForest). """ # Author: Maheshakya Wijewardena, Joel Nothman import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_array_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.metrics.pairwise import pairwise_distances from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors def test_neighbors_accuracy_with_n_candidates(): # Checks whether accuracy increases as `n_candidates` increases. n_candidates_values = np.array([.1, 50, 500]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_candidates_values.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, n_candidates in enumerate(n_candidates_values): lshf = LSHForest(n_candidates=n_candidates) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies print('accuracies:', accuracies) assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") def test_neighbors_accuracy_with_n_estimators(): # Checks whether accuracy increases as `n_estimators` increases. n_estimators = np.array([1, 10, 100]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_estimators.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, t in enumerate(n_estimators): lshf = LSHForest(n_candidates=500, n_estimators=t) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") @ignore_warnings def test_kneighbors(): # Checks whether desired number of neighbors are returned. # It is guaranteed to return the requested number of neighbors # if `min_hash_match` is set to 0. Returned distances should be # in ascending order. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = LSHForest(min_hash_match=0) # Test unfitted estimator assert_raises(ValueError, lshf.kneighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=False) # Desired number of neighbors should be returned. assert_equal(neighbors.shape[1], n_neighbors) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=True) assert_equal(neighbors.shape[0], n_queries) assert_equal(distances.shape[0], n_queries) # Test only neighbors neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=False) assert_equal(neighbors.shape[0], n_queries) # Test random point(not in the data set) query = rng.randn(n_features).reshape(1, -1) lshf.kneighbors(query, n_neighbors=1, return_distance=False) # Test n_neighbors at initialization neighbors = lshf.kneighbors(query, return_distance=False) assert_equal(neighbors.shape[1], 5) # Test `neighbors` has an integer dtype assert_true(neighbors.dtype.kind == 'i', msg="neighbors are not in integer dtype.") def test_radius_neighbors(): # Checks whether Returned distances are less than `radius` # At least one point should be returned when the `radius` is set # to mean distance from the considering point to other points in # the database. # Moreover, this test compares the radius neighbors of LSHForest # with the `sklearn.neighbors.NearestNeighbors`. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = LSHForest() # Test unfitted estimator assert_raises(ValueError, lshf.radius_neighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): # Select a random point in the dataset as the query query = X[rng.randint(0, n_samples)].reshape(1, -1) # At least one neighbor should be returned when the radius is the # mean distance from the query to the points of the dataset. mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=False) assert_equal(neighbors.shape, (1,)) assert_equal(neighbors.dtype, object) assert_greater(neighbors[0].shape[0], 0) # All distances to points in the results of the radius query should # be less than mean_dist distances, neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=True) assert_array_less(distances[0], mean_dist) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.radius_neighbors(queries, return_distance=True) # dists and inds should not be 1D arrays or arrays of variable lengths # hence the use of the object dtype. assert_equal(distances.shape, (n_queries,)) assert_equal(distances.dtype, object) assert_equal(neighbors.shape, (n_queries,)) assert_equal(neighbors.dtype, object) # Compare with exact neighbor search query = X[rng.randint(0, n_samples)].reshape(1, -1) mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist) distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist) # Radius-based queries do not sort the result points and the order # depends on the method, the random_state and the dataset order. Therefore # we need to sort the results ourselves before performing any comparison. sorted_dists_exact = np.sort(distances_exact[0]) sorted_dists_approx = np.sort(distances_approx[0]) # Distances to exact neighbors are less than or equal to approximate # counterparts as the approximate radius query might have missed some # closer neighbors. assert_true(np.all(np.less_equal(sorted_dists_exact, sorted_dists_approx))) @ignore_warnings def test_radius_neighbors_boundary_handling(): X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]] n_points = len(X) # Build an exact nearest neighbors model as reference model to ensure # consistency between exact and approximate methods nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) # Build a LSHForest model with hyperparameter values that always guarantee # exact results on this toy dataset. lsfh = LSHForest(min_hash_match=0, n_candidates=n_points, random_state=42).fit(X) # define a query aligned with the first axis query = [[1., 0.]] # Compute the exact cosine distances of the query to the four points of # the dataset dists = pairwise_distances(query, X, metric='cosine').ravel() # The first point is almost aligned with the query (very small angle), # the cosine distance should therefore be almost null: assert_almost_equal(dists[0], 0, decimal=5) # The second point form an angle of 45 degrees to the query vector assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4)) # The third point is orthogonal from the query vector hence at a distance # exactly one: assert_almost_equal(dists[2], 1) # The last point is almost colinear but with opposite sign to the query # therefore it has a cosine 'distance' very close to the maximum possible # value of 2. assert_almost_equal(dists[3], 2, decimal=5) # If we query with a radius of one, all the samples except the last sample # should be included in the results. This means that the third sample # is lying on the boundary of the radius query: exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1) assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2]) assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1]) # If we perform the same query with a slightly lower radius, the third # point of the dataset that lay on the boundary of the previous query # is now rejected: eps = np.finfo(np.float64).eps exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps) assert_array_equal(np.sort(exact_idx[0]), [0, 1]) assert_array_equal(np.sort(approx_idx[0]), [0, 1]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2]) def test_distances(): # Checks whether returned neighbors are from closest to farthest. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = LSHForest() ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) distances, neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=True) # Returned neighbors should be from closest to farthest, that is # increasing distance values. assert_true(np.all(np.diff(distances[0]) >= 0)) # Note: the radius_neighbors method does not guarantee the order of # the results. def test_fit(): # Checks whether `fit` method sets all attribute values correctly. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = LSHForest(n_estimators=n_estimators) ignore_warnings(lshf.fit)(X) # _input_array = X assert_array_equal(X, lshf._fit_X) # A hash function g(p) for each tree assert_equal(n_estimators, len(lshf.hash_functions_)) # Hash length = 32 assert_equal(32, lshf.hash_functions_[0].components_.shape[0]) # Number of trees_ in the forest assert_equal(n_estimators, len(lshf.trees_)) # Each tree has entries for every data point assert_equal(n_samples, len(lshf.trees_[0])) # Original indices after sorting the hashes assert_equal(n_estimators, len(lshf.original_indices_)) # Each set of original indices in a tree has entries for every data point assert_equal(n_samples, len(lshf.original_indices_[0])) def test_partial_fit(): # Checks whether inserting array is consistent with fitted data. # `partial_fit` method should set all attribute values correctly. n_samples = 12 n_samples_partial_fit = 3 n_features = 2 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) X_partial_fit = rng.rand(n_samples_partial_fit, n_features) lshf = LSHForest() # Test unfitted estimator ignore_warnings(lshf.partial_fit)(X) assert_array_equal(X, lshf._fit_X) ignore_warnings(lshf.fit)(X) # Insert wrong dimension assert_raises(ValueError, lshf.partial_fit, np.random.randn(n_samples_partial_fit, n_features - 1)) ignore_warnings(lshf.partial_fit)(X_partial_fit) # size of _input_array = samples + 1 after insertion assert_equal(lshf._fit_X.shape[0], n_samples + n_samples_partial_fit) # size of original_indices_[1] = samples + 1 assert_equal(len(lshf.original_indices_[0]), n_samples + n_samples_partial_fit) # size of trees_[1] = samples + 1 assert_equal(len(lshf.trees_[1]), n_samples + n_samples_partial_fit) def test_hash_functions(): # Checks randomness of hash functions. # Variance and mean of each hash function (projection vector) # should be different from flattened array of hash functions. # If hash functions are not randomly built (seeded with # same value), variances and means of all functions are equal. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = LSHForest(n_estimators=n_estimators, random_state=rng.randint(0, np.iinfo(np.int32).max)) ignore_warnings(lshf.fit)(X) hash_functions = [] for i in range(n_estimators): hash_functions.append(lshf.hash_functions_[i].components_) for i in range(n_estimators): assert_not_equal(np.var(hash_functions), np.var(lshf.hash_functions_[i].components_)) for i in range(n_estimators): assert_not_equal(np.mean(hash_functions), np.mean(lshf.hash_functions_[i].components_)) def test_candidates(): # Checks whether candidates are sufficient. # This should handle the cases when number of candidates is 0. # User should be warned when number of candidates is less than # requested number of neighbors. X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]], dtype=np.float32) X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1) # For zero candidates lshf = LSHForest(min_hash_match=32) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (3, 32)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=3) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3) assert_equal(distances.shape[1], 3) # For candidates less than n_neighbors lshf = LSHForest(min_hash_match=31) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (5, 31)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=5) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5) assert_equal(distances.shape[1], 5) def test_graphs(): # Smoke tests for graph methods. n_samples_sizes = [5, 10, 20] n_features = 3 rng = np.random.RandomState(42) for n_samples in n_samples_sizes: X = rng.rand(n_samples, n_features) lshf = LSHForest(min_hash_match=0) ignore_warnings(lshf.fit)(X) kneighbors_graph = lshf.kneighbors_graph(X) radius_neighbors_graph = lshf.radius_neighbors_graph(X) assert_equal(kneighbors_graph.shape[0], n_samples) assert_equal(kneighbors_graph.shape[1], n_samples) assert_equal(radius_neighbors_graph.shape[0], n_samples) assert_equal(radius_neighbors_graph.shape[1], n_samples) def test_sparse_input(): # note: Fixed random state in sp.rand is not supported in older scipy. # The test should succeed regardless. X1 = sp.rand(50, 100) X2 = sp.rand(10, 100) forest_sparse = LSHForest(radius=1, random_state=0).fit(X1) forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A) d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True) assert_almost_equal(d_sparse, d_dense) assert_almost_equal(i_sparse, i_dense) d_sparse, i_sparse = forest_sparse.radius_neighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.radius_neighbors(X2.A, return_distance=True) assert_equal(d_sparse.shape, d_dense.shape) for a, b in zip(d_sparse, d_dense): assert_almost_equal(a, b) for a, b in zip(i_sparse, i_dense): assert_almost_equal(a, b)
bsd-3-clause
chrisburr/scikit-learn
examples/plot_multioutput_face_completion.py
330
3019
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces X_test = test[:, :np.ceil(0.5 * n_pixels)] y_test = test[:, np.floor(0.5 * n_pixels):] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
sclc/NAEF
exp_scripts/worker_exp_160606_isc16.py
1
33697
""" Experiment Diary 2016-06-06 for ISC 2016 """ import sys import math import matplotlib.pyplot as plt from scipy import io import numpy as np from scipy.sparse.linalg import * from scipy.sparse import * sys.path.append("../src/") from worker import Worker from native_conjugate_gradient import NativeConjugateGradient from native_conjugate_gradient import NativeBlockConjugateGradient from gerschgorin_circle_theorem import GerschgorinCircleTheoremEigenvalueEstimator from chebyshev_polynomial import ChebyshevPolynomial from chebyshev_basis_cacg import CBCG from legendre_basis_cacg import LBCG from legendre_basis_cacg import BLBCG from chebyshev_basis_cacg import BCBCG from presenter import Presenter from power_iteration import PowerIteration class WorkerIterativeLinearSystemSolverCG_Exp_160606_isc16(Worker): """ Description: Experiment A Numerical Method: Naive Conjugate Gradient tol: max_iteration: matrix: Reference: 1. """ def __init__(self, mat_path): """ """ #print ("WorkerIterativeLinearSystemSolver works good") Worker.__init__(self) self._hist_list = [] if mat_path == "": """ Need to generatre matrix """ print("calling self._matrix_generation") #self._mat = self._matrix_generation() else: self._mat_coo = io.mmread(mat_path) self._mat = self._mat_coo.tocsr() self._mat_info = io.mminfo(mat_path) print("Done reading matrix {}, Row:{}, Col:{}".format( mat_path, self._mat.shape[0], self._mat.shape[1])) print("mminfo:{}".format(self._mat_info)) if self._mat.getformat() == "csr": print("Yeah, it is CSR") def _matrix_generator(self): """ generation of matrix """ print("_matrix_generator") def _setup_testbed(self, block_size): """ this can considered as a basic experiment input descripting """ #self._SB = np.random.random( ( self._mat.shape[0],1) ) #self._BB = np.random.random( ( self._mat.shape[0],block_size) ) #np.savetxt("/home/scl/tmp/rhs.csv",self._B, delimiter=",") #self._B = np.ones( ( self._mat.shape[0],6) ) #self._SX = np.ones ( (self._mat.shape[1],1) ) #self._BX = np.ones ( (self._mat.shape[1],block_size) ) #self._X = np.zeros ( (self._mat.shape[1],1) ) def _setup_numerical_algorithm(self,tol, maxiter, step_val): """ After a linear solver or other numerical methods loaded we need to setup the basic prarm for the algorithm """ self._tol = tol self._maxiter = maxiter self._step_val = step_val def conduct_experiments(self, block_size, tol, maxiter, step_val): """ function to condution the experiment """ print("to conduct the experient") self._setup_testbed(block_size) self._setup_numerical_algorithm(tol,maxiter,step_val) #isc16 figure 1 #self._cg_bcg_bcbcg_least_square_exp() #self._cg_bcg_blbcg_least_square_exp() #self._bcbcg_blbcg_least_square_exp() #isc16 figure 2 #self._bcbcg_blbcg_least_square_exp_b() #self._db_bcg_least_square() #self._db_bcbcg_eigen_param() #self._db_usage_scipy_eig() #self._diff_eigen_estimation_test_b() #isc16 figure 3 self._diff_eigen_estimation_test_c() #self._db_power_iteration_with_shifting_acc1() print("Experiments done") def _cg_bcg_bcbcg_least_square_exp(self): """ """ print("_cg_bcg_bcbcg_least_square_exp starting, ... ") self._BB_1 = np.random.random( ( self._mat.shape[0],1) ) self._BX_1 = np.ones ( (self._mat.shape[1],1) ) self._BB_4 = np.random.random( ( self._mat.shape[0],4) ) self._BX_4 = np.ones ( (self._mat.shape[1],4) ) self._BB_12 = np.random.random( ( self._mat.shape[0],12) ) self._BX_12 = np.ones ( (self._mat.shape[1],12) ) #line 1 bcg_solver_obj = NativeBlockConjugateGradient(self._mat, self._BX_1, self._BB_1, self._tol, self._maxiter) self._final_X_cg, self._final_R_cg, self._residual_hist_cg = bcg_solver_obj.bcg_variant_lstsq_run(0) #line 2 bcg_solver_obj = NativeBlockConjugateGradient(self._mat, self._BX_12, self._BB_12, self._tol, self._maxiter) self._final_X_bcg_m12, self._final_R_bcg_m12, self._residual_hist_bcg_m12 = bcg_solver_obj.bcg_variant_lstsq_run(0) #line 3 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_m1s2, self._final_r_bcbcg_m1s2, self._residual_hist_bcbcg_m1s2 = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 2, self._tol, self._maxiter, 0) #line 4 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_m1s8, self._final_r_bcbcg_m1s8, self._residual_hist_bcbcg_m1s8 = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 8, self._tol, self._maxiter, 0) #line 5 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_m4s2, self._final_r_bcbcg_m4s2, self._residual_hist_bcbcg_m4s2 = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_4, self._BX_4, 2, self._tol, self._maxiter, 0) #line 6 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_m4s8, self._final_r_bcbcg_m4s8, self._residual_hist_bcbcg_m4s8 = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_4, self._BX_4, 8, self._tol, self._maxiter, 0) plot_worker = Presenter() residual_list = [self._residual_hist_cg, self._residual_hist_bcg_m12, \ self._residual_hist_bcbcg_m1s2, self._residual_hist_bcbcg_m1s8, \ self._residual_hist_bcbcg_m4s2, self._residual_hist_bcbcg_m4s8 ] legend_list = ["cg","bcg_m12", "bcbcg_m1s2", "bcbcg_m1s8", "bcbcg_m4s2", "bcbcg_m4s8"] color_list = ["r","k","b","y","m","g"] #plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #plot_worker.instant_plot_y_log10(residual_list, "wathen100(dim=30,401, nnz=471,601, cond=5816.01 )", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) plot_worker.instant_plot_y_log10(residual_list, "wathen100", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _cg_bcg_blbcg_least_square_exp(self): # """ """ # print("_cg_bcg_blbcg_least_square_exp starting, ... ") # self._BB_1 = np.random.random( ( self._mat.shape[0],1) ) # self._BX_1 = np.ones ( (self._mat.shape[1],1) ) # self._BB_6 = np.random.random( ( self._mat.shape[0],6) ) # self._BX_6 = np.ones ( (self._mat.shape[1],6) ) # self._BB_12 = np.random.random( ( self._mat.shape[0],12) ) # self._BX_12 = np.ones ( (self._mat.shape[1],12) ) # #line 1 # bcg_solver_obj = NativeBlockConjugateGradient(self._mat, self._BX_1, self._BB_1, self._tol, self._maxiter) # self._final_X_cg, self._final_R_cg, self._residual_hist_cg = bcg_solver_obj.bcg_variant_lstsq_run(0) # #line 2 # bcg_solver_obj = NativeBlockConjugateGradient(self._mat, self._BX_12, self._BB_12, self._tol, self._maxiter) # self._final_X_bcg_m12, self._final_R_bcg_m12, self._residual_hist_bcg_m12 = bcg_solver_obj.bcg_variant_lstsq_run(0) # #line 3 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m1s2, self._final_r_blbcg_m1s2, self._residual_hist_blbcg_m1s2 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 2, self._tol, self._maxiter, 0) # #line 4 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m1s6, self._final_r_blbcg_m1s6, self._residual_hist_blbcg_m1s6 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 6, self._tol, self._maxiter, 0) # #line 5 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m6s2, self._final_r_blbcg_m6s2, self._residual_hist_blbcg_m6s2 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 2, self._tol, self._maxiter, 0) # #line 6 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m6s6, self._final_r_blbcg_m6s6, self._residual_hist_blbcg_m6s6 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 6, self._tol, self._maxiter, 0) # plot_worker = Presenter() # residual_list = [self._residual_hist_cg, self._residual_hist_bcg_m12, \ # self._residual_hist_blbcg_m1s2, self._residual_hist_blbcg_m1s6, \ # self._residual_hist_blbcg_m6s2, self._residual_hist_blbcg_m6s6 ] # legend_list = ["cg","bcg_m12", "blbcg_m1s2", "blbcg_m1s6", "blbcg_m6s2", "blbcg_m6s6"] # color_list = ["r","k","b","y","m","g"] # plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _bcbcg_blbcg_least_square_exp(self): # """ """ # self._BB_1 = np.random.random( ( self._mat.shape[0],1) ) # self._BX_1 = np.ones ( (self._mat.shape[1],1) ) # self._BB_6 = np.random.random( ( self._mat.shape[0],6) ) # self._BX_6 = np.ones ( (self._mat.shape[1],6) ) # #line 1 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m1s2, self._final_r_blbcg_m1s2, self._residual_hist_blbcg_m1s2 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 2, self._tol, self._maxiter, 0) # #line 2 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m1s6, self._final_r_blbcg_m1s6, self._residual_hist_blbcg_m1s6 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 6, self._tol, self._maxiter, 0) # #line 3 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m6s2, self._final_r_blbcg_m6s2, self._residual_hist_blbcg_m6s2 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 2, self._tol, self._maxiter, 0) # #line 4 # blbcg_solver_obj = BLBCG() # self._final_x_blbcg_m6s6, self._final_r_blbcg_m6s6, self._residual_hist_blbcg_m6s6 = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 6, self._tol, self._maxiter, 0) # #line 5 # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_m1s2, self._final_r_bcbcg_m1s2, self._residual_hist_bcbcg_m1s2 = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 2, self._tol, self._maxiter, 0) # #line 6 # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_m1s6, self._final_r_bcbcg_m1s6, self._residual_hist_bcbcg_m1s6 = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_1, self._BX_1, 6, self._tol, self._maxiter, 0) # #line 7 # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_m6s2, self._final_r_bcbcg_m6s2, self._residual_hist_bcbcg_m6s2 = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 2, self._tol, self._maxiter, 0) # #line 8 # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_m6s6, self._final_r_bcbcg_m6s6, self._residual_hist_bcbcg_m6s6 = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 6, self._tol, self._maxiter, 0) # plot_worker = Presenter() # residual_list = [self._residual_hist_blbcg_m1s2, self._residual_hist_blbcg_m1s6, \ # self._residual_hist_blbcg_m6s2, self._residual_hist_blbcg_m6s6, \ # self._residual_hist_bcbcg_m1s2, self._residual_hist_bcbcg_m1s6, \ # self._residual_hist_bcbcg_m6s2, self._residual_hist_bcbcg_m6s6 ] # legend_list = ["blbcg_m1s2", "blbcg_m1s6", "blbcg_m6s2", "blbcg_m6s6", "bcbcg_m1s2", "bcbcg_m1s6", "bcbcg_m6s2", "bcbcg_m6s6"] # color_list = ["r","k","b","y","m","g", "m", "0.5"] # #plot_worker.instant_plot_y_log10(residual_list, "Chem97ZtZ", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list) # plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) def _bcbcg_blbcg_least_square_exp_b(self): """ figure 2""" print("_bcbcg_blbcg_least_square_exp_b starting ... ") m=3 self._BB = np.random.random( ( self._mat.shape[0],m) ) self._BX = np.ones ( (self._mat.shape[1],m) ) #line 1 blbcg_solver_obj = BLBCG() self._final_x_blbcg_a, self._final_r_blbcg_a, self._residual_hist_blbcg_a = \ blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0) #line 2 blbcg_solver_obj = BLBCG() self._final_x_blbcg_b, self._final_r_blbcg_b, self._residual_hist_blbcg_b = \ blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB, self._BX, 32, self._tol, self._maxiter, 0) #line addition #blbcg_solver_obj = BLBCG() #self._final_x_blbcg_c, self._final_r_blbcg_c, self._residual_hist_blbcg_c = \ # blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB_4, self._BX_4, 32, self._tol, self._maxiter, 0) #line 3 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_a, self._final_r_bcbcg_a, self._residual_hist_bcbcg_a = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0) #line 4 bcbcg_solver_obj = BCBCG() self._final_x_bcbcg_b, self._final_r_bcbcg_b, self._residual_hist_bcbcg_b = \ bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB, self._BX, 32, self._tol, self._maxiter, 0) #line addition #bcbcg_solver_obj = BCBCG() #self._final_x_bcbcg_c, self._final_r_bcbcg_c, self._residual_hist_bcbcg_c = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_4, self._BX_4, 32, self._tol, self._maxiter, 0) plot_worker = Presenter() #residual_list = [self._residual_hist_blbcg_a, self._residual_hist_blbcg_b, self._residual_hist_blbcg_c, \ # self._residual_hist_bcbcg_a, self._residual_hist_bcbcg_b, self._residual_hist_bcbcg_c ] residual_list = [self._residual_hist_blbcg_a, self._residual_hist_blbcg_b, \ self._residual_hist_bcbcg_a, self._residual_hist_bcbcg_b ] #legend_list = ["blbcg_m4s4", "blbcg_m4s8", "blbcg_m4s12", "bcbcg_m4s4", "bcbcg_m4s8", "bcbcg_m4s12"] legend_list = ["blbcg_m3s16", "blbcg_m3s32", "bcbcg_m3s16", "bcbcg_m3s32"] #color_list = ["r","k","b","y","g","m"] color_list = ["r","k","b","g"] #plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) plot_worker.instant_plot_y_log10(residual_list, "bodyy6", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _db_bcbcg_eigen_param(self): # """ """ # self._BB_6 = np.random.random( ( self._mat.shape[0],6) ) # self._BX_6 = np.ones ( (self._mat.shape[1],6) ) # gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator() # max_eigenvalue, min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat) # print("################", "max:",max_eigenvalue, " , min:", min_eigenvalue) # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_eigenparam_m6s6, self._final_r_bcbcg_eigenparam_m6s6, self._residual_hist_bcbcg_eigenparam_m6s6 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB_6, self._BX_6, 6, self._tol, self._maxiter, 0, max_eigenvalue, min_eigenvalue) # bcbcg_solver_obj = BCBCG() # self._final_x_bcbcg_m6s6, self._final_r_bcbcg_m6s6, self._residual_hist_bcbcg_m6s6 = \ # bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB_6, self._BX_6, 6, self._tol, self._maxiter, 0) # plot_worker = Presenter() # residual_list = [self._residual_hist_bcbcg_eigenparam_m6s6, self._residual_hist_bcbcg_m6s6] # legend_list = ["bcbcg_eigenparam_m6s6", "bcbcg_m6s6"] # color_list = ["r","k"] # plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _db_usage_scipy_eig(self): # """ """ # eigs_vals, eigs_vecs = eigs(self._mat, k=6) # print(eigs_vals) # #eigs_vals, eigs_vecs = eigs(self._mat, k=6, which="LM") # #eigs_vals, eigs_vecs = eigs(self._mat, k=6, tol=1e-3,which="SR") # self._matdense = self._mat.todense() # #print(self._matdense) # eig_vals,eig_vecs = np.linalg.eigh(self._matdense) # #eig_vals,eig_vecs = np.linalg.eig(self._matdense) # #print(eig_vals[0], " , ", eig_vals[-1]) # print(eig_vals) # #print(eig_vecs) #def _diff_eigen_estimation_test(self): # """ Notice: change a huge into dense and use numpy.linalg.eigh is very dangerous, you computer may be freezing forever""" # gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator() # gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat) # print("################Gershchogrin theorem", "max:",gerschgorin_max_eigenvalue, " , old min:", gerschgorin_min_eigenvalue) # if gerschgorin_min_eigenvalue< 0.: # gerschgorin_min_eigenvalue = 0. # print("################Gershchogrin theorem", "max:",gerschgorin_max_eigenvalue, " , new min:", gerschgorin_min_eigenvalue) # power_method_solver = PowerIteration() # self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) # pm_maxiters = 1000 # pm_tol = 1e-6 # pm_max_eigen_vec, pm_max_eigen_val, pm_max_eigen_list = power_method_solver.naive_power_iteration (self._mat, self._init_eigen_vec, pm_maxiters, pm_tol) # print("################Power method max:",pm_max_eigen_val, " , iteration:", len(pm_max_eigen_list)) # self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) # pm_min_eigen_vec, pm_min_eigen_val, pm_min_eigen_list = power_method_solver.power_iteration_with_shifting_acc1 (self._mat, self._init_eigen_vec, pm_max_eigen_val, pm_maxiters, pm_tol) # pm_min_eigen_val = pm_min_eigen_val + pm_max_eigen_val # print("################Power method min:",pm_min_eigen_val, " , iteration:", len(pm_min_eigen_list)) # numpy_eigh_eigen_vals, numpy_eigh_eigen_vecs = np.linalg.eigh(self._mat.todense()) # assert numpy_eigh_eigen_vals[0]<numpy_eigh_eigen_vals[-1] # print("################Numpy.linalg.eigh", "max:",numpy_eigh_eigen_vals[-1], " , min:", numpy_eigh_eigen_vals[0]) # ## # eigen_repo = {"numpy_eigh":(numpy_eigh_eigen_vals[-1],numpy_eigh_eigen_vals[0]), \ # "gerschgorin":(gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue),\ # "power_method":(pm_max_eigen_val,pm_min_eigen_val), \ # "mix_method":(pm_max_eigen_val,gerschgorin_min_eigenvalue) \ # } # print(type(eigen_repo)) # print(eigen_repo["numpy_eigh"], "," ,eigen_repo["gerschgorin"], " , ", eigen_repo["power_method"]) # print("max: ", eigen_repo["numpy_eigh"][0], "," ,eigen_repo["gerschgorin"][0], " , ", eigen_repo["power_method"][0]) # self._BB = np.random.random( ( self._mat.shape[0],4) ) # self._BX = np.ones ( (self._mat.shape[1],4) ) # #line 1 # bcbcg_solver_obj = BCBCG() # self._final_np_x_bcbcg_m4s4, self._final_np_r_bcbcg_m4s4, self._np_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["numpy_eigh"][0], eigen_repo["numpy_eigh"][1]) # #line 2 # bcbcg_solver_obj = BCBCG() # self._final_gerschgorin_x_bcbcg_m4s4, self._final_gerschgorin_r_bcbcg_m4s4, self._gerschgorin_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["gerschgorin"][0], eigen_repo["gerschgorin"][1]) # #line 3 # bcbcg_solver_obj = BCBCG() # self._final_pm_x_bcbcg_m4s4, self._final_pm_r_bcbcg_m4s4, self._pm_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["power_method"][0], eigen_repo["power_method"][1]) # #line 4 # bcbcg_solver_obj = BCBCG() # self._final_mix_x_bcbcg_m4s4, self._final_mix_r_bcbcg_m4s4, self._mix_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["mix_method"][0], eigen_repo["mix_method"][1]) # plot_worker = Presenter() # residual_list = [self._np_residual_hist_bcbcg_m4s4, self._gerschgorin_residual_hist_bcbcg_m4s4, self._pm_residual_hist_bcbcg_m4s4,self._mix_residual_hist_bcbcg_m4s4 ] # legend_list = ["N_bcbcg_m4s4", "G_bcbcg_m4s4","P_bcbcg_m4s4","M_bcbcg_m4s4" ] # color_list = ["r","k","b", "y"] # plot_worker.instant_plot_y_log10(residual_list, "Chem97ZtZ", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _diff_eigen_estimation_test_b(self): # """ """ # print("_diff_eigen_estimation_test_b starting ...") # gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator() # gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat) # print("################Gershchogrin theorem", "min:",gerschgorin_max_eigenvalue, " , old min:", gerschgorin_min_eigenvalue) # if gerschgorin_min_eigenvalue< 0.: # gerschgorin_min_eigenvalue = 0. # print("################Gershchogrin theorem", "min:",gerschgorin_max_eigenvalue, " , new min:", gerschgorin_min_eigenvalue) # eigs_vals, eigs_vecs = eigs(self._mat, k=6, which="LM") # print("eigs_vals max", eigs_vals) # eigs_vals, eigs_vecs = eigs(self._mat, k=6, tol=1e-3,which="SR") # print("eigs_vals min", eigs_vals) # power_method_solver = PowerIteration() # self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) # pm_maxiters = 300 # pm_tol = 1e-6 # pm_max_eigen_vec, pm_max_eigen_val, pm_max_eigen_list = power_method_solver.naive_power_iteration (self._mat, self._init_eigen_vec, pm_maxiters, pm_tol) # print("################Power method max:",pm_max_eigen_val, " , iteration:", len(pm_max_eigen_list)) # self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) # pm_min_eigen_vec, pm_min_eigen_val, pm_min_eigen_list = power_method_solver.power_iteration_with_shifting_acc1 (self._mat, self._init_eigen_vec, pm_max_eigen_val, pm_maxiters, pm_tol) # pm_min_eigen_val = pm_min_eigen_val + pm_max_eigen_val # print("################Power method min:",pm_min_eigen_val, " , iteration:", len(pm_min_eigen_list)) # ## # eigen_repo = {"gerschgorin":(gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue),\ # "power_method":(pm_max_eigen_val,pm_min_eigen_val), \ # "mix_method":(pm_max_eigen_val,gerschgorin_min_eigenvalue) \ # } # #eigen_repo = {"gerschgorin":(gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue),\ # # "power_method":(pm_max_eigen_val,gerschgorin_min_eigenvalue), \ # # "mix_method":(pm_max_eigen_val,gerschgorin_min_eigenvalue) \ # # } # print(eigen_repo["gerschgorin"], " , ", eigen_repo["power_method"], " , ",eigen_repo["mix_method"]) # self._BB = np.random.random( ( self._mat.shape[0],4) ) # self._BX = np.ones ( (self._mat.shape[1],4) ) # #line 2 # bcbcg_solver_obj = BCBCG() # self._final_gerschgorin_x_bcbcg_m4s4, self._final_gerschgorin_r_bcbcg_m4s4, self._gerschgorin_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["gerschgorin"][0], eigen_repo["gerschgorin"][1]) # #line 3 # bcbcg_solver_obj = BCBCG() # self._final_pm_x_bcbcg_m4s4, self._final_pm_r_bcbcg_m4s4, self._pm_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["power_method"][0], eigen_repo["power_method"][1]) # #line 4 # bcbcg_solver_obj = BCBCG() # self._final_mix_x_bcbcg_m4s4, self._final_mix_r_bcbcg_m4s4, self._mix_residual_hist_bcbcg_m4s4 = \ # bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, 16, self._tol, self._maxiter, 0, eigen_repo["mix_method"][0], eigen_repo["mix_method"][1]) # plot_worker = Presenter() # residual_list = [self._gerschgorin_residual_hist_bcbcg_m4s4, self._pm_residual_hist_bcbcg_m4s4,self._mix_residual_hist_bcbcg_m4s4 ] # legend_list = ["G_bcbcg_m4s4","P_bcbcg_m4s4","M_bcbcg_m4s4" ] # color_list = ["r","k","b"] # plot_worker.instant_plot_y_log10(residual_list, "Chem97ZtZ", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) def _diff_eigen_estimation_test_c(self): """ """ print("_diff_eigen_estimation_test_c starting ...") gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator() gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat) print("################Gershchogrin theorem", "min:",gerschgorin_max_eigenvalue, " , old min:", gerschgorin_min_eigenvalue) if gerschgorin_min_eigenvalue< 0.: gerschgorin_min_eigenvalue = 0. print("################Gershchogrin theorem", "min:",gerschgorin_max_eigenvalue, " , new min:", gerschgorin_min_eigenvalue) eigs_vals, eigs_vecs = eigs(self._mat, k=6, which="LM") print("eigs_vals max", eigs_vals) #eigs_vals, eigs_vecs = eigs(self._mat, k=6, tol=1e-3,which="SR") #print("eigs_vals min", eigs_vals) power_method_solver = PowerIteration() self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) pm_maxiters = 500 pm_tol = 1e-6 pm_max_eigen_vec, pm_max_eigen_val, pm_max_eigen_list = power_method_solver.naive_power_iteration (self._mat, self._init_eigen_vec, pm_maxiters, pm_tol) print("################Power method max:",pm_max_eigen_val, " , iteration:", len(pm_max_eigen_list)) #self._init_eigen_vec = np.random.random( ( self._mat.shape[0],1) ) #pm_min_eigen_vec, pm_min_eigen_val, pm_min_eigen_list = power_method_solver.power_iteration_with_shifting_acc1 (self._mat, self._init_eigen_vec, pm_max_eigen_val, pm_maxiters, pm_tol) #pm_min_eigen_val = pm_min_eigen_val + pm_max_eigen_val #print("################Power method min:",pm_min_eigen_val, " , iteration:", len(pm_min_eigen_list)) ## eigen_repo = {"gerschgorin":(gerschgorin_max_eigenvalue, gerschgorin_min_eigenvalue),\ "mix_method":(pm_max_eigen_val,gerschgorin_min_eigenvalue) \ } print(eigen_repo["gerschgorin"], " , ", eigen_repo["mix_method"]) self._BB = np.random.random( ( self._mat.shape[0],3) ) self._BX = np.ones ( (self._mat.shape[1],3) ) step_val = 32 #line 2 bcbcg_solver_obj = BCBCG() self._final_gerschgorin_x_bcbcg_a, self._final_gerschgorin_r_bcbcg_a, self._gerschgorin_residual_hist_bcbcg_a = \ bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, step_val, self._tol, self._maxiter, 0, eigen_repo["gerschgorin"][0], eigen_repo["gerschgorin"][1]) #line 4 bcbcg_solver_obj = BCBCG() self._final_mix_x_bcbcg_b, self._final_mix_r_bcbcg_b, self._mix_residual_hist_bcbcg_b = \ bcbcg_solver_obj.bcbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, step_val, self._tol, self._maxiter, 0, eigen_repo["mix_method"][0], eigen_repo["mix_method"][1]) #addition blbcg_solver_obj = BLBCG() self._final_gerschgorin_x_blbcg_a, self._final_gerschgorin_r_blbcg_a, self._gerschgorin_residual_hist_blbcg_a = \ blbcg_solver_obj.blbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, step_val, self._tol, self._maxiter, 0, eigen_repo["gerschgorin"][0], eigen_repo["gerschgorin"][1]) blbcg_solver_obj = BLBCG() self._final_mix_x_blbcg_b, self._final_mix_r_blbcg_b, self._mix_residual_hist_blbcg_b = \ blbcg_solver_obj.blbcg_solver_least_square_eigen_param(self._mat, self._BB, self._BX, step_val, self._tol, self._maxiter, 0, eigen_repo["mix_method"][0], eigen_repo["mix_method"][1]) plot_worker = Presenter() residual_list = [self._gerschgorin_residual_hist_bcbcg_a, self._mix_residual_hist_bcbcg_b , \ self._gerschgorin_residual_hist_blbcg_a, self._mix_residual_hist_blbcg_b ] legend_list = ["G_bcbcg_m3s32","M_bcbcg_m3s32" , "G_blbcg_m3s32","M_blbcg_m3s32"] color_list = ["r","k","b","g"] #plot_worker.instant_plot_y_log10(residual_list, "test", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) plot_worker.instant_plot_y_log10(residual_list, "wathen100", "#iteration", "$\\mathbf{log_{10}\\frac{||x_1||}{||b_1||}}$", legend_list, color_list) #def _db_power_iteration_with_shifting_acc1(self): # """ """ # print("_db_power_iteration_with_shifting_acc1 starting ....") # #print(self._mat) # #print(self._mat.diagonal()) # #shift_factor = 100. # #print(self._mat.diagonal() + shift_factor) # #self._mat.setdiag(self._mat.diagonal() + shift_factor) # #print("new dia") # #print(self._mat.diagonal()) # #print(self._mat) # pm_max_eigen_val = -100 # pm_maxiters = 10 # pm_tol = 1e-12 # power_method_solver = PowerIteration() # self._init_eigen_vec_1 = np.random.random( ( self._mat.shape[0],1) ) # self._init_eigen_vec_2 = self._init_eigen_vec_1.copy() # pm_min_eigen_vec, pm_min_eigen_val, pm_min_eigen_list = power_method_solver.power_iteration_with_shifting_acc1 (self._mat, self._init_eigen_vec_1, pm_max_eigen_val, pm_maxiters, pm_tol) # print("###########") # pm_min_eigen_vec, pm_min_eigen_val, pm_min_eigen_list = power_method_solver.power_iteration_with_shifting (self._mat, self._init_eigen_vec_2, pm_max_eigen_val, pm_maxiters, pm_tol) def main (): # main function for today's experiments #small matrix for debuging #mat_path = "/home/scl/MStore/mesh1e1/mesh1e1.mtx" #bad #mat_path = "/home/scl/MStore/vanbody/vanbody.mtx" #mat_path = "/home/scl/MStore/olafu/olafu.mtx" #mat_path = "/home/scl/MStore/raefsky4/raefsky4.mtx" #mat_path = "/home/scl/MStore/smt/smt.mtx" #mat_path = "/home/scl/MStore/bcsstk36/bcsstk36.mtx" #mat_path = "/home/scl/MStore/pdb1HYS/pdb1HYS.mtx" #mat_path = "/home/scl/MStore/ship_001/ship_001.mtx" # not so good #mat_path = "/home/scl/MStore/Dubcova1/Dubcova1.mtx" #mat_path = "/home/scl/MStore/bcsstk17/bcsstk17.mtx" #mat_path = "/home/scl/MStore/wathen100/wathen100.mtx" #mat_path = "/home/scl/MStore/nasa2146/nasa2146.mtx" #mat_path = "/home/scl/MStore/crystm01/crystm01.mtx" #mat_path = "/home/scl/MStore/ex13/ex13.mtx" #mat_path = "/home/scl/MStore/LFAT5/LFAT5.mtx" #good #mat_path = "/home/scl/MStore/bodyy6/bodyy6.mtx" #mat_path = "/home/scl/MStore/crystm02/crystm02.mtx" #mat_path = "/home/scl/MStore/Chem97ZtZ/Chem97ZtZ.mtx" #isc16 #mat_path = "/home/scl/MStore/bodyy6/bodyy6.mtx" mat_path = "/home/scl/MStore/wathen100/wathen100.mtx" block_size = 4 tol = 1e-10 maxiter = 1500 step_val =64 linear_system_solver_worker_test = WorkerIterativeLinearSystemSolverCG_Exp_160606_isc16(mat_path) linear_system_solver_worker_test.conduct_experiments(block_size,tol,maxiter, step_val) #linear_system_solver_worker_test.chebyshev_poly_exp_a(0,6) #linear_system_solver_worker_test.legendre_poly_exp_a(0,6) #linear_system_solver_worker_test.debug_NativeConjugateGradient() if __name__ == "__main__": """ call main funtion for testing """ main()
gpl-3.0
rvraghav93/scikit-learn
sklearn/gaussian_process/gaussian_process.py
17
34869
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # (mostly translation, see implementation details) # License: BSD 3 clause from __future__ import print_function import numpy as np from scipy import linalg, optimize from ..base import BaseEstimator, RegressorMixin from ..metrics.pairwise import manhattan_distances from ..utils import check_random_state, check_array, check_X_y from ..utils.validation import check_is_fitted from . import regression_models as regression from . import correlation_models as correlation from ..utils import deprecated MACHINE_EPSILON = np.finfo(np.double).eps @deprecated("l1_cross_distances was deprecated in version 0.18 " "and will be removed in 0.20.") def l1_cross_distances(X): """ Computes the nonzero componentwise L1 cross-distances between the vectors in X. Parameters ---------- X : array_like An array with shape (n_samples, n_features) Returns ------- D : array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise L1 cross-distances. ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = check_array(X) n_samples, n_features = X.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples]) return D, ij @deprecated("GaussianProcess was deprecated in version 0.18 and will be " "removed in 0.20. Use the GaussianProcessRegressor instead.") class GaussianProcess(BaseEstimator, RegressorMixin): """The legacy Gaussian Process model class. .. deprecated:: 0.18 This class will be removed in 0.20. Use the :class:`GaussianProcessRegressor` instead. Read more in the :ref:`User Guide <gaussian_process>`. Parameters ---------- regr : string or callable, optional A regression function returning an array of outputs of the linear regression functional basis. The number of observations n_samples should be greater than the size p of this basis. Default assumes a simple constant regression trend. Available built-in regression models are:: 'constant', 'linear', 'quadratic' corr : string or callable, optional A stationary autocorrelation function returning the autocorrelation between two points x and x'. Default assumes a squared-exponential autocorrelation model. Built-in correlation models are:: 'absolute_exponential', 'squared_exponential', 'generalized_exponential', 'cubic', 'linear' beta0 : double array_like, optional The regression weight vector to perform Ordinary Kriging (OK). Default assumes Universal Kriging (UK) so that the vector beta of regression weights is estimated using the maximum likelihood principle. storage_mode : string, optional A string specifying whether the Cholesky decomposition of the correlation matrix should be stored in the class (storage_mode = 'full') or not (storage_mode = 'light'). Default assumes storage_mode = 'full', so that the Cholesky decomposition of the correlation matrix is stored. This might be a useful parameter when one is not interested in the MSE and only plan to estimate the BLUP, for which the correlation matrix is not required. verbose : boolean, optional A boolean specifying the verbose level. Default is verbose = False. theta0 : double array_like, optional An array with shape (n_features, ) or (1, ). The parameters in the autocorrelation model. If thetaL and thetaU are also specified, theta0 is considered as the starting point for the maximum likelihood estimation of the best set of parameters. Default assumes isotropic autocorrelation model with theta0 = 1e-1. thetaL : double array_like, optional An array with shape matching theta0's. Lower bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. thetaU : double array_like, optional An array with shape matching theta0's. Upper bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. normalize : boolean, optional Input X and observations y are centered and reduced wrt means and standard deviations estimated from the n_samples observations provided. Default is normalize = True so that data is normalized to ease maximum likelihood estimation. nugget : double or ndarray, optional Introduce a nugget effect to allow smooth predictions from noisy data. If nugget is an ndarray, it must be the same length as the number of data points used for the fit. The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). optimizer : string, optional A string specifying the optimization algorithm to be used. Default uses 'fmin_cobyla' algorithm from scipy.optimize. Available optimizers are:: 'fmin_cobyla', 'Welch' 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_. It consists in iterating over several one-dimensional optimizations instead of running one single multi-dimensional optimization. random_start : int, optional The number of times the Maximum Likelihood Estimation should be performed from a random starting point. The first MLE always uses the specified starting point (theta0), the next starting points are picked at random according to an exponential distribution (log-uniform on [thetaL, thetaU]). Default does not use random starting point (random_start = 1). random_state : int, RandomState instance or None, optional (default=None) The generator used to shuffle the sequence of coordinates of theta in the Welch optimizer. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- theta_ : array Specified theta OR the best set of autocorrelation parameters (the \ sought maximizer of the reduced likelihood function). reduced_likelihood_function_value_ : array The optimal reduced likelihood function value. Examples -------- >>> import numpy as np >>> from sklearn.gaussian_process import GaussianProcess >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T >>> y = (X * np.sin(X)).ravel() >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.) >>> gp.fit(X, y) # doctest: +ELLIPSIS GaussianProcess(beta0=None... ... Notes ----- The presentation implementation is based on a translation of the DACE Matlab toolbox, see reference [NLNS2002]_. References ---------- .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002) http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D. Morris (1992). Screening, predicting, and computer experiments. Technometrics, 34(1) 15--25.` http://www.jstor.org/stable/1269548 """ _regression_types = { 'constant': regression.constant, 'linear': regression.linear, 'quadratic': regression.quadratic} _correlation_types = { 'absolute_exponential': correlation.absolute_exponential, 'squared_exponential': correlation.squared_exponential, 'generalized_exponential': correlation.generalized_exponential, 'cubic': correlation.cubic, 'linear': correlation.linear} _optimizer_types = [ 'fmin_cobyla', 'Welch'] def __init__(self, regr='constant', corr='squared_exponential', beta0=None, storage_mode='full', verbose=False, theta0=1e-1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=10. * MACHINE_EPSILON, random_state=None): self.regr = regr self.corr = corr self.beta0 = beta0 self.storage_mode = storage_mode self.verbose = verbose self.theta0 = theta0 self.thetaL = thetaL self.thetaU = thetaU self.normalize = normalize self.nugget = nugget self.optimizer = optimizer self.random_start = random_start self.random_state = random_state def fit(self, X, y): """ The Gaussian Process model fitting method. Parameters ---------- X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) or shape (n_samples, n_targets) with the observations of the output to be predicted. Returns ------- gp : self A fitted Gaussian Process model object awaiting data to perform predictions. """ # Run input checks self._check_params() self.random_state = check_random_state(self.random_state) # Force data to 2D numpy.array X, y = check_X_y(X, y, multi_output=True, y_numeric=True) self.y_ndim_ = y.ndim if y.ndim == 1: y = y[:, np.newaxis] # Check shapes of DOE & observations n_samples, n_features = X.shape _, n_targets = y.shape # Run input checks self._check_params(n_samples) # Normalize data or don't if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[X_std == 0.] = 1. y_std[y_std == 0.] = 1. # center and scale X if necessary X = (X - X_mean) / X_std y = (y - y_mean) / y_std else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) # Calculate matrix of distances D between samples D, ij = l1_cross_distances(X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple input features cannot have the same" " target value.") # Regression matrix and parameters F = self.regr(X) n_samples_F = F.shape[0] if F.ndim > 1: p = F.shape[1] else: p = 1 if n_samples_F != n_samples: raise Exception("Number of rows in F and X do not match. Most " "likely something is going wrong with the " "regression model.") if p > n_samples_F: raise Exception(("Ordinary least squares problem is undetermined " "n_samples=%d must be greater than the " "regression model size p=%d.") % (n_samples, p)) if self.beta0 is not None: if self.beta0.shape[0] != p: raise Exception("Shapes of beta0 and F do not match.") # Set attributes self.X = X self.y = y self.D = D self.ij = ij self.F = F self.X_mean, self.X_std = X_mean, X_std self.y_mean, self.y_std = y_mean, y_std # Determine Gaussian Process model parameters if self.thetaL is not None and self.thetaU is not None: # Maximum Likelihood Estimation of the parameters if self.verbose: print("Performing Maximum Likelihood Estimation of the " "autocorrelation parameters...") self.theta_, self.reduced_likelihood_function_value_, par = \ self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad parameter region. " "Try increasing upper bound") else: # Given parameters if self.verbose: print("Given autocorrelation parameters. " "Computing Gaussian Process model parameters...") self.theta_ = self.theta0 self.reduced_likelihood_function_value_, par = \ self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad point. Try increasing theta0.") self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if self.storage_mode == 'light': # Delete heavy data (it will be computed again if required) # (it is required only when MSE is wanted in self.predict) if self.verbose: print("Light storage mode specified. " "Flushing autocorrelation matrix...") self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self def predict(self, X, eval_MSE=False, batch_size=None): """ This function evaluates the Gaussian Process model at x. Parameters ---------- X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simultaneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns ------- y : array_like, shape (n_samples, ) or (n_samples, n_targets) An array with shape (n_eval, ) if the Gaussian Process was trained on an array of shape (n_samples, ) or an array with shape (n_eval, n_targets) if the Gaussian Process was trained on an array of shape (n_samples, n_targets) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) or (n_eval, n_targets) as with y, with the Mean Squared Error at x. """ check_is_fitted(self, "X") # Check input shapes X = check_array(X) n_eval, _ = X.shape n_samples, n_features = self.X.shape n_samples_y, n_targets = self.y.shape # Run input checks self._check_params(n_samples) if X.shape[1] != n_features: raise ValueError(("The number of features in X (X.shape[1] = %d) " "should match the number of features used " "for fit() " "which is %d.") % (X.shape[1], n_features)) if batch_size is None: # No memory management # (evaluates all given points in a single batch run) # Normalize input X = (X - self.X_mean) / self.X_std # Initialize output y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) # Scaled predictor y_ = np.dot(f, self.beta) + np.dot(r, self.gamma) # Predictor y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets) if self.y_ndim_ == 1: y = y.ravel() # Mean Squared Error if eval_MSE: C = self.C if C is None: # Light storage mode (need to recompute C, F, Ft and G) if self.verbose: print("This GaussianProcess used 'light' storage mode " "at instantiation. Need to recompute " "autocorrelation matrix...") reduced_likelihood_function_value, par = \ self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = linalg.solve_triangular(self.C, r.T, lower=True) if self.beta0 is None: # Universal Kriging u = linalg.solve_triangular(self.G.T, np.dot(self.Ft.T, rt) - f.T, lower=True) else: # Ordinary Kriging u = np.zeros((n_targets, n_eval)) MSE = np.dot(self.sigma2.reshape(n_targets, 1), (1. - (rt ** 2.).sum(axis=0) + (u ** 2.).sum(axis=0))[np.newaxis, :]) MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets) # Mean Squared Error might be slightly negative depending on # machine precision: force to zero! MSE[MSE < 0.] = 0. if self.y_ndim_ == 1: MSE = MSE.ravel() return y, MSE else: return y else: # Memory management if type(batch_size) is not int or batch_size <= 0: raise Exception("batch_size must be a positive integer") if eval_MSE: y, MSE = np.zeros(n_eval), np.zeros(n_eval) for k in range(max(1, int(n_eval / batch_size))): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to], MSE[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y, MSE else: y = np.zeros(n_eval) for k in range(max(1, int(n_eval / batch_size))): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y def reduced_likelihood_function(self, theta=None): """ This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters ---------- theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns ------- reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: sigma2 Gaussian Process variance. beta Generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. gamma Gaussian Process weights. C Cholesky decomposition of the correlation matrix [R]. Ft Solution of the linear equation system : [R] x Ft = F G QR decomposition of the matrix Ft. """ check_is_fitted(self, "X") if theta is None: # Use built-in autocorrelation parameters theta = self.theta_ # Initialize output reduced_likelihood_function_value = - np.inf par = {} # Retrieve data n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if D is None: # Light storage mode (need to recompute D, ij and F) D, ij = l1_cross_distances(self.X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple X are not allowed") F = self.regr(self.X) # Set up R r = self.corr(theta, D) R = np.eye(n_samples) * (1. + self.nugget) R[ij[:, 0], ij[:, 1]] = r R[ij[:, 1], ij[:, 0]] = r # Cholesky decomposition of R try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return reduced_likelihood_function_value, par # Get generalized least squares solution Ft = linalg.solve_triangular(C, F, lower=True) Q, G = linalg.qr(Ft, mode='economic') sv = linalg.svd(G, compute_uv=False) rcondG = sv[-1] / sv[0] if rcondG < 1e-10: # Check F sv = linalg.svd(F, compute_uv=False) condF = sv[0] / sv[-1] if condF > 1e15: raise Exception("F is too ill conditioned. Poor combination " "of regression model and observations.") else: # Ft is too ill conditioned, get out (try different theta) return reduced_likelihood_function_value, par Yt = linalg.solve_triangular(C, self.y, lower=True) if self.beta0 is None: # Universal Kriging beta = linalg.solve_triangular(G, np.dot(Q.T, Yt)) else: # Ordinary Kriging beta = np.array(self.beta0) rho = Yt - np.dot(Ft, beta) sigma2 = (rho ** 2.).sum(axis=0) / n_samples # The determinant of R is equal to the squared product of the diagonal # elements of its Cholesky decomposition C detR = (np.diag(C) ** (2. / n_samples)).prod() # Compute/Organize output reduced_likelihood_function_value = - sigma2.sum() * detR par['sigma2'] = sigma2 * self.y_std ** 2. par['beta'] = beta par['gamma'] = linalg.solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return reduced_likelihood_function_value, par def _arg_max_reduced_likelihood_function(self): """ This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters ---------- self : All parameters are stored in the Gaussian Process model object. Returns ------- optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt. """ # Initialize output best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print("The chosen optimizer is: " + str(self.optimizer)) if self.random_start > 1: print(str(self.random_start) + " random starts are required.") percent_completed = 0. # Force optimizer to fmin_cobyla if the model is meant to be isotropic if self.optimizer == 'Welch' and self.theta0.size == 1: self.optimizer = 'fmin_cobyla' if self.optimizer == 'fmin_cobyla': def minus_reduced_likelihood_function(log10t): return - self.reduced_likelihood_function( theta=10. ** log10t)[0] constraints = [] for i in range(self.theta0.size): constraints.append(lambda log10t, i=i: log10t[i] - np.log10(self.thetaL[0, i])) constraints.append(lambda log10t, i=i: np.log10(self.thetaU[0, i]) - log10t[i]) for k in range(self.random_start): if k == 0: # Use specified starting point as first guess theta0 = self.theta0 else: # Generate a random starting point log10-uniformly # distributed between bounds log10theta0 = (np.log10(self.thetaL) + self.random_state.rand(*self.theta0.shape) * np.log10(self.thetaU / self.thetaL)) theta0 = 10. ** log10theta0 # Run Cobyla try: log10_optimal_theta = \ optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0).ravel(), constraints, iprint=0) except ValueError as ve: print("Optimization failed. Try increasing the ``nugget``") raise ve optimal_theta = 10. ** log10_optimal_theta optimal_rlf_value, optimal_par = \ self.reduced_likelihood_function(theta=optimal_theta) # Compare the new optimizer to the best previous one if k > 0: if optimal_rlf_value > best_optimal_rlf_value: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if self.verbose and self.random_start > 1: if (20 * k) / self.random_start > percent_completed: percent_completed = (20 * k) / self.random_start print("%s completed" % (5 * percent_completed)) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif self.optimizer == 'Welch': # Backup of the given attributes theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU corr = self.corr verbose = self.verbose # This will iterate over fmin_cobyla optimizer self.optimizer = 'fmin_cobyla' self.verbose = False # Initialize under isotropy assumption if verbose: print("Initialize under isotropy assumption...") self.theta0 = check_array(self.theta0.min()) self.thetaL = check_array(self.thetaL.min()) self.thetaU = check_array(self.thetaU.max()) theta_iso, optimal_rlf_value_iso, par_iso = \ self._arg_max_reduced_likelihood_function() optimal_theta = theta_iso + np.zeros(theta0.shape) # Iterate over all dimensions of theta allowing for anisotropy if verbose: print("Now improving allowing for anisotropy...") for i in self.random_state.permutation(theta0.size): if verbose: print("Proceeding along dimension %d..." % (i + 1)) self.theta0 = check_array(theta_iso) self.thetaL = check_array(thetaL[0, i]) self.thetaU = check_array(thetaU[0, i]) def corr_cut(t, d): return corr(check_array(np.hstack([optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::]])), d) self.corr = corr_cut optimal_theta[0, i], optimal_rlf_value, optimal_par = \ self._arg_max_reduced_likelihood_function() # Restore the given attributes self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError("This optimizer ('%s') is not " "implemented yet. Please contribute!" % self.optimizer) return optimal_theta, optimal_rlf_value, optimal_par def _check_params(self, n_samples=None): # Check regression model if not callable(self.regr): if self.regr in self._regression_types: self.regr = self._regression_types[self.regr] else: raise ValueError("regr should be one of %s or callable, " "%s was given." % (self._regression_types.keys(), self.regr)) # Check regression weights if given (Ordinary Kriging) if self.beta0 is not None: self.beta0 = np.atleast_2d(self.beta0) if self.beta0.shape[1] != 1: # Force to column vector self.beta0 = self.beta0.T # Check correlation model if not callable(self.corr): if self.corr in self._correlation_types: self.corr = self._correlation_types[self.corr] else: raise ValueError("corr should be one of %s or callable, " "%s was given." % (self._correlation_types.keys(), self.corr)) # Check storage mode if self.storage_mode != 'full' and self.storage_mode != 'light': raise ValueError("Storage mode should either be 'full' or " "'light', %s was given." % self.storage_mode) # Check correlation parameters self.theta0 = np.atleast_2d(self.theta0) lth = self.theta0.size if self.thetaL is not None and self.thetaU is not None: self.thetaL = np.atleast_2d(self.thetaL) self.thetaU = np.atleast_2d(self.thetaU) if self.thetaL.size != lth or self.thetaU.size != lth: raise ValueError("theta0, thetaL and thetaU must have the " "same length.") if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL): raise ValueError("The bounds must satisfy O < thetaL <= " "thetaU.") elif self.thetaL is None and self.thetaU is None: if np.any(self.theta0 <= 0): raise ValueError("theta0 must be strictly positive.") elif self.thetaL is None or self.thetaU is None: raise ValueError("thetaL and thetaU should either be both or " "neither specified.") # Force verbose type to bool self.verbose = bool(self.verbose) # Force normalize type to bool self.normalize = bool(self.normalize) # Check nugget value self.nugget = np.asarray(self.nugget) if np.any(self.nugget) < 0.: raise ValueError("nugget must be positive or zero.") if (n_samples is not None and self.nugget.shape not in [(), (n_samples,)]): raise ValueError("nugget must be either a scalar " "or array of length n_samples.") # Check optimizer if self.optimizer not in self._optimizer_types: raise ValueError("optimizer should be one of %s" % self._optimizer_types) # Force random_start type to int self.random_start = int(self.random_start)
bsd-3-clause
loretoparisi/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py
73
7068
import numpy as np from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid from matplotlib._delaunay import nn_interpolate_unstructured __all__ = ['LinearInterpolator', 'NNInterpolator'] def slice2gridspec(key): """Convert a 2-tuple of slices to start,stop,steps for x and y. key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep)) For now, the only accepted step values are imaginary integers (interpreted in the same way numpy.mgrid, etc. do). """ if ((len(key) != 2) or (not isinstance(key[0], slice)) or (not isinstance(key[1], slice))): raise ValueError("only 2-D slices, please") x0 = key[1].start x1 = key[1].stop xstep = key[1].step if not isinstance(xstep, complex) or int(xstep.real) != xstep.real: raise ValueError("only the [start:stop:numsteps*1j] form supported") xstep = int(xstep.imag) y0 = key[0].start y1 = key[0].stop ystep = key[0].step if not isinstance(ystep, complex) or int(ystep.real) != ystep.real: raise ValueError("only the [start:stop:numsteps*1j] form supported") ystep = int(ystep.imag) return x0, x1, xstep, y0, y1, ystep class LinearInterpolator(object): """Interpolate a function defined on the nodes of a triangulation by using the planes defined by the three function values at each corner of the triangles. LinearInterpolator(triangulation, z, default_value=numpy.nan) triangulation -- Triangulation instance z -- the function values at each node of the triangulation default_value -- a float giving the default value should the interpolating point happen to fall outside of the convex hull of the triangulation At the moment, the only regular rectangular grids are supported for interpolation. vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j] vals would then be a (ysteps, xsteps) array containing the interpolated values. These arguments are interpreted the same way as numpy.mgrid. Attributes: planes -- (ntriangles, 3) array of floats specifying the plane for each triangle. Linear Interpolation -------------------- Given the Delauany triangulation (or indeed *any* complete triangulation) we can interpolate values inside the convex hull by locating the enclosing triangle of the interpolation point and returning the value at that point of the plane defined by the three node values. f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2] The interpolated function is C0 continuous across the convex hull of the input points. It is C1 continuous across the convex hull except for the nodes and the edges of the triangulation. """ def __init__(self, triangulation, z, default_value=np.nan): self.triangulation = triangulation self.z = np.asarray(z, dtype=np.float64) self.default_value = default_value self.planes = compute_planes(triangulation.x, triangulation.y, self.z, triangulation.triangle_nodes) def __getitem__(self, key): x0, x1, xstep, y0, y1, ystep = slice2gridspec(key) grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value, self.planes, self.triangulation.x, self.triangulation.y, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return grid class NNInterpolator(object): """Interpolate a function defined on the nodes of a triangulation by the natural neighbors method. NNInterpolator(triangulation, z, default_value=numpy.nan) triangulation -- Triangulation instance z -- the function values at each node of the triangulation default_value -- a float giving the default value should the interpolating point happen to fall outside of the convex hull of the triangulation At the moment, the only regular rectangular grids are supported for interpolation. vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j] vals would then be a (ysteps, xsteps) array containing the interpolated values. These arguments are interpreted the same way as numpy.mgrid. Natural Neighbors Interpolation ------------------------------- One feature of the Delaunay triangulation is that for each triangle, its circumcircle contains no other point (although in degenerate cases, like squares, other points may be *on* the circumcircle). One can also construct what is called the Voronoi diagram from a Delaunay triangulation by connecting the circumcenters of the triangles to those of their neighbors to form a tesselation of irregular polygons covering the plane and containing only one node from the triangulation. Each point in one node's Voronoi polygon is closer to that node than any other node. To compute the Natural Neighbors interpolant, we consider adding the interpolation point to the triangulation. We define the natural neighbors of this point as the set of nodes participating in Delaunay triangles whose circumcircles contain the point. To restore the Delaunay-ness of the triangulation, one would only have to alter those triangles and Voronoi polygons. The new Voronooi diagram would have a polygon around the inserted point. This polygon would "steal" area from the original Voronoi polygons. For each node i in the natural neighbors set, we compute the area stolen from its original Voronoi polygon, stolen[i]. We define the natural neighbors coordinates phi[i] = stolen[i] / sum(stolen,axis=0) We then use these phi[i] to weight the corresponding function values from the input data z to compute the interpolated value. The interpolated surface is C1-continuous except at the nodes themselves across the convex hull of the input points. One can find the set of points that a given node will affect by computing the union of the areas covered by the circumcircles of each Delaunay triangle that node participates in. """ def __init__(self, triangulation, z, default_value=np.nan): self.triangulation = triangulation self.z = np.asarray(z, dtype=np.float64) self.default_value = default_value def __getitem__(self, key): x0, x1, xstep, y0, y1, ystep = slice2gridspec(key) grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value, self.triangulation.x, self.triangulation.y, self.z, self.triangulation.circumcenters, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return grid def __call__(self, intx, inty): intz = nn_interpolate_unstructured(intx, inty, self.default_value, self.triangulation.x, self.triangulation.y, self.z, self.triangulation.circumcenters, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return intz
agpl-3.0
xyguo/scikit-learn
examples/model_selection/plot_train_error_vs_test_error.py
349
2577
""" ========================= Train error vs Test error ========================= Illustration of how the performance of an estimator on unseen data (test data) is not the same as the performance on training data. As the regularization increases the performance on train decreases while the performance on test is optimal within a range of values of the regularization parameter. The example with an Elastic-Net regression model and the performance is measured using the explained variance a.k.a. R^2. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause import numpy as np from sklearn import linear_model ############################################################################### # Generate sample data n_samples_train, n_samples_test, n_features = 75, 150, 500 np.random.seed(0) coef = np.random.randn(n_features) coef[50:] = 0.0 # only the top 10 features are impacting the model X = np.random.randn(n_samples_train + n_samples_test, n_features) y = np.dot(X, coef) # Split train and test data X_train, X_test = X[:n_samples_train], X[n_samples_train:] y_train, y_test = y[:n_samples_train], y[n_samples_train:] ############################################################################### # Compute train and test errors alphas = np.logspace(-5, 1, 60) enet = linear_model.ElasticNet(l1_ratio=0.7) train_errors = list() test_errors = list() for alpha in alphas: enet.set_params(alpha=alpha) enet.fit(X_train, y_train) train_errors.append(enet.score(X_train, y_train)) test_errors.append(enet.score(X_test, y_test)) i_alpha_optim = np.argmax(test_errors) alpha_optim = alphas[i_alpha_optim] print("Optimal regularization parameter : %s" % alpha_optim) # Estimate the coef_ on full data with optimal regularization parameter enet.set_params(alpha=alpha_optim) coef_ = enet.fit(X, y).coef_ ############################################################################### # Plot results functions import matplotlib.pyplot as plt plt.subplot(2, 1, 1) plt.semilogx(alphas, train_errors, label='Train') plt.semilogx(alphas, test_errors, label='Test') plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k', linewidth=3, label='Optimum on test') plt.legend(loc='lower left') plt.ylim([0, 1.2]) plt.xlabel('Regularization parameter') plt.ylabel('Performance') # Show estimated coef_ vs true coef plt.subplot(2, 1, 2) plt.plot(coef, label='True coef') plt.plot(coef_, label='Estimated coef') plt.legend() plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26) plt.show()
bsd-3-clause
duncanwp/iris
lib/iris/tests/test_quickplot.py
1
7759
# (C) British Crown Copyright 2010 - 2018, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Tests the high-level plotting interface. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # import iris tests first so that some things can be initialised before importing anything else import iris.tests as tests import iris.tests.test_plot as test_plot import iris # Run tests in no graphics mode if matplotlib is not available. if tests.MPL_AVAILABLE: import matplotlib.pyplot as plt import iris.plot as iplt import iris.quickplot as qplt # Caches _load_theta so subsequent calls are faster def cache(fn, cache={}): def inner(*args, **kwargs): key = "result" if not cache: cache[key] = fn(*args, **kwargs) return cache[key] return inner @cache def _load_theta(): path = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp')) theta = iris.load_cube(path, 'air_potential_temperature') # Improve the unit theta.units = 'K' return theta @tests.skip_data @tests.skip_plot class TestQuickplotCoordinatesGiven(test_plot.TestPlotCoordinatesGiven): def setUp(self): tests.GraphicsTest.setUp(self) filename = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp')) self.cube = test_plot.load_cube_once(filename, 'air_potential_temperature') self.draw_module = iris.quickplot self.contourf = test_plot.LambdaStr('iris.quickplot.contourf', lambda cube, *args, **kwargs: iris.quickplot.contourf(cube, *args, **kwargs)) self.contour = test_plot.LambdaStr('iris.quickplot.contour', lambda cube, *args, **kwargs: iris.quickplot.contour(cube, *args, **kwargs)) self.points = test_plot.LambdaStr('iris.quickplot.points', lambda cube, *args, **kwargs: iris.quickplot.points(cube, c=cube.data, *args, **kwargs)) self.plot = test_plot.LambdaStr('iris.quickplot.plot', lambda cube, *args, **kwargs: iris.quickplot.plot(cube, *args, **kwargs)) self.results = {'yx': ( [self.contourf, ['grid_latitude', 'grid_longitude']], [self.contourf, ['grid_longitude', 'grid_latitude']], [self.contour, ['grid_latitude', 'grid_longitude']], [self.contour, ['grid_longitude', 'grid_latitude']], [self.points, ['grid_latitude', 'grid_longitude']], [self.points, ['grid_longitude', 'grid_latitude']], ), 'zx': ( [self.contourf, ['model_level_number', 'grid_longitude']], [self.contourf, ['grid_longitude', 'model_level_number']], [self.contour, ['model_level_number', 'grid_longitude']], [self.contour, ['grid_longitude', 'model_level_number']], [self.points, ['model_level_number', 'grid_longitude']], [self.points, ['grid_longitude', 'model_level_number']], ), 'tx': ( [self.contourf, ['time', 'grid_longitude']], [self.contourf, ['grid_longitude', 'time']], [self.contour, ['time', 'grid_longitude']], [self.contour, ['grid_longitude', 'time']], [self.points, ['time', 'grid_longitude']], [self.points, ['grid_longitude', 'time']], ), 'x': ( [self.plot, ['grid_longitude']], ), 'y': ( [self.plot, ['grid_latitude']], ), } @tests.skip_data @tests.skip_plot class TestLabels(tests.GraphicsTest): def setUp(self): super(TestLabels, self).setUp() self.theta = _load_theta() def _slice(self, coords): """Returns the first cube containing the requested coordinates.""" for cube in self.theta.slices(coords): break return cube def _small(self): # Use a restricted size so we can make out the detail cube = self._slice(['model_level_number', 'grid_longitude']) return cube[:5, :5] def test_contour(self): qplt.contour(self._small()) self.check_graphic() qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude']) self.check_graphic() def test_contourf(self): qplt.contourf(self._small()) cube = self._small() iplt.orography_at_points(cube) self.check_graphic() qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude']) self.check_graphic() qplt.contourf(self._small(), coords=['grid_longitude', 'model_level_number']) self.check_graphic() def test_contourf_nameless(self): cube = self._small() cube.standard_name = None cube.attributes['STASH'] = '' qplt.contourf(cube, coords=['grid_longitude', 'model_level_number']) self.check_graphic() def test_pcolor(self): qplt.pcolor(self._small()) self.check_graphic() def test_pcolormesh(self): qplt.pcolormesh(self._small()) #cube = self._small() #iplt.orography_at_bounds(cube) self.check_graphic() def test_map(self): cube = self._slice(['grid_latitude', 'grid_longitude']) qplt.contour(cube) self.check_graphic() # check that the result of adding 360 to the data is *almost* identically the same result lon = cube.coord('grid_longitude') lon.points = lon.points + 360 qplt.contour(cube) self.check_graphic() def test_alignment(self): cube = self._small() qplt.contourf(cube) #qplt.outline(cube) qplt.points(cube) self.check_graphic() @tests.skip_data @tests.skip_plot class TestTimeReferenceUnitsLabels(tests.GraphicsTest): def setUp(self): super(TestTimeReferenceUnitsLabels, self).setUp() path = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp')) self.cube = iris.load_cube(path)[:, 0, 0] def test_reference_time_units(self): # units should not be displayed for a reference time qplt.plot(self.cube.coord('time'), self.cube) plt.gcf().autofmt_xdate() self.check_graphic() def test_not_reference_time_units(self): # units should be displayed for other time coordinates qplt.plot(self.cube.coord('forecast_period'), self.cube) self.check_graphic() if __name__ == "__main__": tests.main()
lgpl-3.0
doubaoatthu/UWRoutingSystem
dt.py
1
1669
from sklearn import tree from sklearn.externals.six import StringIO from IPython.display import Image import os import pydot import numpy preference = [] label = [] with open("../data/survey.txt") as f: contents = f.readlines() for content in contents: content = content[1:-2] content = content.replace("\"","") labelarr = content.split(",") labelarr = labelarr[1:] intlabelarr = map(int,labelarr) preference.append(intlabelarr) with open("../data/path.txt") as f: contents = f.readlines() for content in contents: fstring = content.split("feature") if(len(fstring) > 1): features = fstring[1] features = features[3:-4] featarr = features.split(",") for i in range(0,len(featarr)): if(featarr[i] == "true"): featarr[i] = "1" if(featarr[i] == "false"): featarr[i] = "0" label.append(map(int,map(float,featarr))) # print(preference) # print(label) # print(len(label)) x = numpy.array(label) print(x) y = x.T print("============") print(y) print(len(y)) print(len(preference)) fname = ["sunny","cloudy","rainy/snowy","tired","coffee","bathroom","avoid crowd","curiousity","printer","campus event","hurry","fresh air","meet friend"] def drawDecisionTree(classIndex): clf = tree.DecisionTreeClassifier() clf = clf.fit(preference,y[classIndex]) dot_data = StringIO() # change it: class_names = cnames[classIndex] tree.export_graphviz(clf,out_file=dot_data,feature_names= fname,filled=True, rounded=True,special_characters=True) graph = pydot.graph_from_dot_data(dot_data.getvalue()) filename = "decisionTree_" + str(classIndex) + ".pdf" graph.write_pdf(filename) for i in range(6,9): drawDecisionTree(i)
mit
lbishal/scikit-learn
sklearn/linear_model/coordinate_descent.py
8
76416
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Gael Varoquaux <gael.varoquaux@inria.fr> # # License: BSD 3 clause import sys import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import LinearModel, _pre_fit from ..base import RegressorMixin from .base import center_data, sparse_center_data from ..utils import check_array, check_X_y, deprecated from ..utils.validation import check_random_state from ..model_selection import check_cv from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import xrange from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..utils.validation import column_or_1d from ..exceptions import ConvergenceWarning from . import cd_fast ############################################################################### # Paths functions def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, eps=1e-3, n_alphas=100, normalize=False, copy_X=True): """ Compute the grid of alpha values for elastic net parameter search Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication y : ndarray, shape (n_samples,) Target values Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean, default True Whether to fit an intercept or not normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ n_samples = len(y) sparse_center = False if Xy is None: X_sparse = sparse.isspmatrix(X) sparse_center = X_sparse and (fit_intercept or normalize) X = check_array(X, 'csc', copy=(copy_X and fit_intercept and not X_sparse)) if not X_sparse: # X can be touched inplace thanks to the above line X, y, _, _, _ = center_data(X, y, fit_intercept, normalize, copy=False) Xy = safe_sparse_dot(X.T, y, dense_output=True) if sparse_center: # Workaround to find alpha_max for sparse matrices. # since we should not destroy the sparsity of such matrices. _, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept, normalize) mean_dot = X_mean * np.sum(y) if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if sparse_center: if fit_intercept: Xy -= mean_dot[:, np.newaxis] if normalize: Xy /= X_std[:, np.newaxis] alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)) if alpha_max <= np.finfo(float).resolution: alphas = np.empty(n_alphas) alphas.fill(np.finfo(float).resolution) return alphas return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[::-1] def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute Lasso path with coordinate descent The Lasso optimization function varies for mono and multi-outputs. For mono-output tasks it is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <lasso>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,), or (n_samples, n_outputs) Target values eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. positive : bool, default False If set to True, forces coefficients to be positive. return_n_iter : bool whether to return the number of iterations or not. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. Note that in certain cases, the Lars solver may be significantly faster to implement this functionality. In particular, linear interpolation can be used to retrieve model coefficients between the values output by lars_path Examples --------- Comparing lasso_path and lars_path with interpolation: >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T >>> y = np.array([1, 2, 3.1]) >>> # Use lasso_path to compute a coefficient path >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) >>> print(coef_path) [[ 0. 0. 0.46874778] [ 0.2159048 0.4425765 0.23689075]] >>> # Now use lars_path and 1D linear interpolation to compute the >>> # same path >>> from sklearn.linear_model import lars_path >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') >>> from scipy import interpolate >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], ... coef_path_lars[:, ::-1]) >>> print(coef_path_continuous([5., 1., .5])) [[ 0. 0. 0.46915237] [ 0.2159048 0.4425765 0.23668876]] See also -------- lars_path Lasso LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, **params) def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, check_input=True, **params): """Compute elastic net path with coordinate descent The elastic net optimization function varies for mono and multi-outputs. For mono-output tasks it is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,) or (n_samples, n_outputs) Target values l1_ratio : float, optional float between 0 and 1 passed to elastic net (scaling between l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso eps : float Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. return_n_iter : bool whether to return the number of iterations or not. positive : bool, default False If set to True, forces coefficients to be positive. check_input : bool, default True Skip input validation checks, including the Gram matrix when provided assuming there are handled by the caller when check_input=False. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. (Is returned when ``return_n_iter`` is set to True). Notes ----- See examples/plot_lasso_coordinate_descent_path.py for an example. See also -------- MultiTaskElasticNet MultiTaskElasticNetCV ElasticNet ElasticNetCV """ # We expect X and y to be already float64 Fortran ordered when bypassing # checks if check_input: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False) if Xy is not None: # Xy should be a 1d contiguous array or a 2D C ordered array Xy = check_array(Xy, dtype=np.float64, order='C', copy=False, ensure_2d=False) n_samples, n_features = X.shape multi_output = False if y.ndim != 1: multi_output = True _, n_outputs = y.shape # MultiTaskElasticNet does not support sparse matrices if not multi_output and sparse.isspmatrix(X): if 'X_mean' in params: # As sparse matrices are not actually centered we need this # to be passed to the CD solver. X_sparse_scaling = params['X_mean'] / params['X_std'] else: X_sparse_scaling = np.zeros(n_features) # X should be normalized and fit already if function is called # from ElasticNet.fit if check_input: X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False, copy=False) if alphas is None: # No need to normalize of fit_intercept: it has been done # above alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio, fit_intercept=False, eps=eps, n_alphas=n_alphas, normalize=False, copy_X=False) else: alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered n_alphas = len(alphas) tol = params.get('tol', 1e-4) max_iter = params.get('max_iter', 1000) dual_gaps = np.empty(n_alphas) n_iters = [] rng = check_random_state(params.get('random_state', None)) selection = params.get('selection', 'cyclic') if selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (selection == 'random') if not multi_output: coefs = np.empty((n_features, n_alphas), dtype=np.float64) else: coefs = np.empty((n_outputs, n_features, n_alphas), dtype=np.float64) if coef_init is None: coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1])) else: coef_ = np.asfortranarray(coef_init) for i, alpha in enumerate(alphas): l1_reg = alpha * l1_ratio * n_samples l2_reg = alpha * (1.0 - l1_ratio) * n_samples if not multi_output and sparse.isspmatrix(X): model = cd_fast.sparse_enet_coordinate_descent( coef_, l1_reg, l2_reg, X.data, X.indices, X.indptr, y, X_sparse_scaling, max_iter, tol, rng, random, positive) elif multi_output: model = cd_fast.enet_coordinate_descent_multi_task( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random) elif isinstance(precompute, np.ndarray): # We expect precompute to be already Fortran ordered when bypassing # checks if check_input: precompute = check_array(precompute, dtype=np.float64, order='C') model = cd_fast.enet_coordinate_descent_gram( coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, tol, rng, random, positive) elif precompute is False: model = cd_fast.enet_coordinate_descent( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive) else: raise ValueError("Precompute should be one of True, False, " "'auto' or array-like") coef_, dual_gap_, eps_, n_iter_ = model coefs[..., i] = coef_ dual_gaps[i] = dual_gap_ n_iters.append(n_iter_) if dual_gap_ > eps_: warnings.warn('Objective did not converge.' + ' You might want' + ' to increase the number of iterations', ConvergenceWarning) if verbose: if verbose > 2: print(model) elif verbose > 1: print('Path: %03i out of %03i' % (i, n_alphas)) else: sys.stderr.write('.') if return_n_iter: return alphas, coefs, dual_gaps, n_iters return alphas, coefs, dual_gaps ############################################################################### # ElasticNet model class ElasticNet(LinearModel, RegressorMixin): """Linear regression with combined L1 and L2 priors as regularizer. Minimizes the objective function:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 where:: alpha = a + b and l1_ratio = a / (a + b) The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, unless you supply your own sequence of alpha. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- alpha : float Constant that multiplies the penalty terms. Defaults to 1.0 See the notes for the exact mathematical meaning of this parameter. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the Lasso object is not advised and you should prefer the LinearRegression object. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. fit_intercept : bool Whether the intercept should be estimated or not. If ``False``, the data is assumed to be already centered. normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. max_iter : int, optional The maximum number of iterations copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Notes ----- To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- SGDRegressor: implements elastic net regression with incremental training. SGDClassifier: implements logistic regression with elastic net penalty (``SGDClassifier(loss="log", penalty="elasticnet")``). """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): self.alpha = alpha self.l1_ratio = l1_ratio self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.intercept_ = 0.0 self.random_state = random_state self.selection = selection def fit(self, X, y, check_input=True): """Fit model with coordinate descent. Parameters ----------- X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape (n_samples,) or (n_samples, n_targets) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn("With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator", stacklevel=2) # We expect X and y to be already float64 Fortran ordered arrays # when bypassing checks if check_input: y = np.asarray(y, dtype=np.float64) X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept, multi_output=True, y_numeric=True) y = check_array(y, dtype=np.float64, order='F', copy=False, ensure_2d=False) X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False) if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_samples, n_features = X.shape n_targets = y.shape[1] if self.selection not in ['cyclic', 'random']: raise ValueError("selection should be either random or cyclic.") if not self.warm_start or self.coef_ is None: coef_ = np.zeros((n_targets, n_features), dtype=np.float64, order='F') else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=np.float64) self.n_iter_ = [] for k in xrange(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = \ self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_mean=X_mean, X_std=X_std, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection, check_input=False) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_]) self._set_intercept(X_mean, y_mean, X_std) # return self for chaining fit and predict calls return self @property def sparse_coef_(self): """ sparse representation of the fitted coef """ return sparse.csr_matrix(self.coef_) @deprecated(" and will be removed in 0.19") def decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ return self._decision_function(X) def _decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True) + self.intercept_) else: return super(ElasticNet, self)._decision_function(X) ############################################################################### # Lasso model class Lasso(ElasticNet): """Linear Model trained with L1 prior as regularizer (aka the Lasso) The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). Read more in the :ref:`User Guide <lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1 term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` is with the Lasso object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | array-like, default=False Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : int | array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [ 0.85 0. ] >>> print(clf.intercept_) 0.15 See also -------- lars_path lasso_path LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): super(Lasso, self).__init__( alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) ############################################################################### # Functions for CV with paths functions def _path_residuals(X, y, train, test, path, path_params, alphas=None, l1_ratio=1, X_order=None, dtype=None): """Returns the MSE for the models computed by 'path' Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values train : list of indices The indices of the train set test : list of indices The indices of the test set path : callable function returning a list of models on the path. See enet_path for an example of signature path_params : dictionary Parameters passed to the path function alphas : array-like, optional Array of float that is used for cross-validation. If not provided, computed using 'path' l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 X_order : {'F', 'C', or None}, optional The order of the arrays expected by the path function to avoid memory copies dtype : a numpy dtype or None The dtype of the arrays expected by the path function to avoid memory copies """ X_train = X[train] y_train = y[train] X_test = X[test] y_test = y[test] fit_intercept = path_params['fit_intercept'] normalize = path_params['normalize'] if y.ndim == 1: precompute = path_params['precompute'] else: # No Gram variant of multi-task exists right now. # Fall back to default enet_multitask precompute = False X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept, copy=False) path_params = path_params.copy() path_params['Xy'] = Xy path_params['X_mean'] = X_mean path_params['X_std'] = X_std path_params['precompute'] = precompute path_params['copy_X'] = False path_params['alphas'] = alphas if 'l1_ratio' in path_params: path_params['l1_ratio'] = l1_ratio # Do the ordering and type casting here, as if it is done in the path, # X is copied and a reference is kept here X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order) alphas, coefs, _ = path(X_train, y_train, **path_params) del X_train, y_train if y.ndim == 1: # Doing this so that it becomes coherent with multioutput. coefs = coefs[np.newaxis, :, :] y_mean = np.atleast_1d(y_mean) y_test = y_test[:, np.newaxis] if normalize: nonzeros = np.flatnonzero(X_std) coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis] intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs) if sparse.issparse(X_test): n_order, n_features, n_alphas = coefs.shape # Work around for sparse matices since coefs is a 3-D numpy array. coefs_feature_major = np.rollaxis(coefs, 1) feature_2d = np.reshape(coefs_feature_major, (n_features, -1)) X_test_coefs = safe_sparse_dot(X_test, feature_2d) X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1) else: X_test_coefs = safe_sparse_dot(X_test, coefs) residues = X_test_coefs - y_test[:, :, np.newaxis] residues += intercepts this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0) return this_mses class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)): """Base class for iterative model fitting along a regularization path""" @abstractmethod def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.copy_X = copy_X self.cv = cv self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as float64, Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values """ y = np.asarray(y, dtype=np.float64) if y.shape[0] == 0: raise ValueError("y has 0 samples: %r" % y) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV): if model_str == 'ElasticNet': model = ElasticNet() else: model = Lasso() if y.ndim > 1 and y.shape[1] > 1: raise ValueError("For multi-task outputs, use " "MultiTask%sCV" % (model_str)) y = column_or_1d(y, warn=True) else: if sparse.isspmatrix(X): raise TypeError("X should be dense but a sparse matrix was" "passed") elif y.ndim == 1: raise ValueError("For mono-task outputs, use " "%sCV" % (model_str)) if model_str == 'ElasticNet': model = MultiTaskElasticNet() else: model = MultiTaskLasso() if self.selection not in ["random", "cyclic"]: raise ValueError("selection should be either random or cyclic.") # This makes sure that there is no duplication in memory. # Dealing right with copy_X is important in the following: # Multiple functions touch X and subsamples of X and can induce a # lot of duplication of memory copy_X = self.copy_X and self.fit_intercept if isinstance(X, np.ndarray) or sparse.isspmatrix(X): # Keep a reference to X reference_to_old_X = X # Let us not impose fortran ordering or float64 so far: it is # not useful for the cross-validation loop and will be done # by the model fitting itself X = check_array(X, 'csc', copy=False) if sparse.isspmatrix(X): if (hasattr(reference_to_old_X, "data") and not np.may_share_memory(reference_to_old_X.data, X.data)): # X is a sparse matrix and has been copied copy_X = False elif not np.may_share_memory(reference_to_old_X, X): # X has been copied copy_X = False del reference_to_old_X else: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) copy_X = False if X.shape[0] != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (X.shape[0], y.shape[0])) # All LinearModelCV parameters except 'cv' are acceptable path_params = self.get_params() if 'l1_ratio' in path_params: l1_ratios = np.atleast_1d(path_params['l1_ratio']) # For the first path, we need to set l1_ratio path_params['l1_ratio'] = l1_ratios[0] else: l1_ratios = [1, ] path_params.pop('cv', None) path_params.pop('n_jobs', None) alphas = self.alphas n_l1_ratio = len(l1_ratios) if alphas is None: alphas = [] for l1_ratio in l1_ratios: alphas.append(_alpha_grid( X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X)) else: # Making sure alphas is properly ordered. alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) # We want n_alphas to be the number of alphas used for each l1_ratio. n_alphas = len(alphas[0]) path_params.update({'n_alphas': n_alphas}) path_params['copy_X'] = copy_X # We are not computing in parallel, we can modify X # inplace in the folds if not (self.n_jobs == 1 or self.n_jobs is None): path_params['copy_X'] = False # init cross-validation generator cv = check_cv(self.cv) # Compute path for all folds and compute MSE to get the best alpha folds = list(cv.split(X)) best_mse = np.inf # We do a double for loop folded in one, in order to be able to # iterate in parallel on l1_ratio and folds jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=np.float64) for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) for train, test in folds) mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(jobs) mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) mean_mse = np.mean(mse_paths, axis=1) self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1)) for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): i_best_alpha = np.argmin(mse_alphas) this_best_mse = mse_alphas[i_best_alpha] if this_best_mse < best_mse: best_alpha = l1_alphas[i_best_alpha] best_l1_ratio = l1_ratio best_mse = this_best_mse self.l1_ratio_ = best_l1_ratio self.alpha_ = best_alpha if self.alphas is None: self.alphas_ = np.asarray(alphas) if n_l1_ratio == 1: self.alphas_ = self.alphas_[0] # Remove duplicate alphas in case alphas is provided. else: self.alphas_ = np.asarray(alphas[0]) # Refit the model with the parameters selected common_params = dict((name, value) for name, value in self.get_params().items() if name in model.get_params()) model.set_params(**common_params) model.alpha = best_alpha model.l1_ratio = best_l1_ratio model.copy_X = copy_X model.precompute = False model.fit(X, y) if not hasattr(self, 'l1_ratio'): del self.l1_ratio_ self.coef_ = model.coef_ self.intercept_ = model.intercept_ self.dual_gap_ = model.dual_gap_ self.n_iter_ = model.n_iter_ return self class LassoCV(LinearModelCV, RegressorMixin): """Lasso linear model with iterative fitting along a regularization path The best model is selected by cross-validation. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path alphas : numpy array, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional If positive, restrict regression coefficients to be positive selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean, default True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) intercept_ : float | array, shape (n_targets,) independent term in decision function. mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting dual_gap_ : ndarray, shape () The dual gap at the end of the optimization for the optimal alpha (``alpha_``). n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- lars_path lasso_path LassoLars Lasso LassoLarsCV """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): super(LassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive, random_state=random_state, selection=selection) class ElasticNetCV(LinearModelCV, RegressorMixin): """Elastic Net model with iterative fitting along a regularization path The best model is selected by cross-validation. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- l1_ratio : float or array of floats, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path, used for each l1_ratio. alphas : numpy array, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation l1_ratio_ : float The compromise between l1 and l2 penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) Parameter vector (w in the cost function formula), intercept_ : float | array, shape (n_targets, n_features) Independent term in the decision function. mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds) Mean square error for the test set on each fold, varying l1_ratio and alpha. alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. More specifically, the optimization objective is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 for:: alpha = a + b and l1_ratio = a / (a + b). See also -------- enet_path ElasticNet """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection ############################################################################### # Multi Task ElasticNet and Lasso models (with joint feature selection) class MultiTaskElasticNet(Lasso): """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 l1_ratio : float The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). If a 1D y is \ passed in at fit (non multi-task usage), ``coef_`` is then a 1D array n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.45663524 0.45612256] [ 0.45663524 0.45612256]] >>> print(clf.intercept_) [ 0.0872422 0.0872422] See also -------- ElasticNet, MultiTaskLasso Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit MultiTaskLasso model with coordinate descent Parameters ----------- X : ndarray, shape (n_samples, n_features) Data y : ndarray, shape (n_samples, n_tasks) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ # X and y must be of type float64 X = check_array(X, dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept) y = check_array(y, dtype=np.float64, ensure_2d=False) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape _, n_tasks = y.shape if n_samples != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (n_samples, y.shape[0])) X, y, X_mean, y_mean, X_std = center_data( X, y, self.fit_intercept, self.normalize, copy=False) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64, order='F') l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory if self.selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (self.selection == 'random') self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \ cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random) self._set_intercept(X_mean, y_mean, X_std) if self.dual_gap_ > self.eps_: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations') # return self for chaining fit and predict calls return self class MultiTaskLasso(MultiTaskElasticNet): """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of earch row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4 random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_tasks, n_features) parameter vector (W in the cost function formula) intercept_ : array, shape (n_tasks,) independent term in decision function. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskLasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.89393398 0. ] [ 0.89393398 0. ]] >>> print(clf.intercept_) [ 0.10606602 0.10606602] See also -------- Lasso, MultiTaskElasticNet Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.l1_ratio = 1.0 self.random_state = random_state self.selection = selection class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 ElasticNet with built-in cross-validation. The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path l1_ratio : float or array of floats The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) or \ (n_l1_ratio, n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio l1_ratio_ : float best l1_ratio obtained by cross-validation. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNetCV() >>> clf.fit([[0,0], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001, fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1, normalize=False, random_state=None, selection='cyclic', tol=0.0001, verbose=0) >>> print(clf.coef_) [[ 0.52875032 0.46958558] [ 0.52875032 0.46958558]] >>> print(clf.intercept_) [ 0.00166409 0.00166409] See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskLassoCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state self.selection = selection class MultiTaskLassoCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 Lasso with built-in cross-validation. The optimization objective for MultiTaskLasso is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automaticlly. n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskElasticNetCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, random_state=None, selection='cyclic'): super(MultiTaskLassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state, selection=selection)
bsd-3-clause
pulkitag/caffe-python-layers
streetview_data_group_rots.py
1
13622
import caffe import numpy as np import argparse, pprint from multiprocessing import Pool import scipy.misc as scm from os import path as osp import my_pycaffe_io as mpio import my_pycaffe as mp from easydict import EasyDict as edict from transforms3d.transforms3d import euler as t3eu import street_label_utils as slu import time import glog import pdb import pickle import math import copy try: import cv2 except: print('OPEN CV not found, resorting to scipy.misc') MODULE_PATH = osp.dirname(osp.realpath(__file__)) def get_jitter(coords=None, jitAmt=0, jitPct=0): dx, dy = 0, 0 if jitAmt > 0: assert (jitPct == 0) rx, ry = np.random.random(), np.random.random() dx, dy = rx * jitAmt, ry * jitAmt if np.random.random() > 0.5: dx = - dx if np.random.random() > 0.5: dy = -dy if jitPct > 0: h, w = [], [] for n in range(len(coords)): x1, y1, x2, y2 = coords[n] h.append(y2 - y1) w.append(x2 - x1) mnH, mnW = min(h), min(w) rx, ry = np.random.random(), np.random.random() dx, dy = rx * mnW * jitPct, ry * mnH * jitPct if np.random.random() > 0.5: dx = - dx if np.random.random() > 0.5: dy = -dy return int(dx), int(dy) def rotate_image(im, theta): ''' theta: in degrees ''' rows, cols, _ = im.shape M = cv2.getRotationMatrix2D((cols/2,rows/2), theta, 1) dst = cv2.warpAffine(im, M, (cols, rows)) return dst def read_double_images(imName1, imName2, imPrms, rollJitter=None): imSz, cropSz = imPrms['imSz'], imPrms['cropSz'] jitPct = imPrms['jitter_pct'] jitAmt = imPrms['jitter_amt'] im = [] #Read the images: try: if rollJitter is None: im.append(cv2.imread(imName1)) else: r1 = rollJitter[0] img = cv2.imread(imName1) im.append(rotate_image(img, r1)) except: raise Exception('Image %s read incorrectly' % imName1) try: if rollJitter is None: im.append(cv2.imread(imName2)) else: r2 = rollJitter[1] img = cv2.imread(imName2) im.append(rotate_image(img, r2)) except: raise Exception('Image %s read incorrectly' % imName2) try: h1, w1, ch1 = im[0].shape h2, w2, ch2 = im[1].shape assert ch1==ch2 except: print (im[0].shape) print (im[1].shape) raise Exception('Something is wrong in read image') ims = np.concatenate(im, axis=2) #Crop the images h, w, ch = ims.shape x1 = int(max(0, w/2.0 - cropSz/2.0)) y1 = int(max(0, h/2.0 - cropSz/2.0)) dx, dy = get_jitter(jitAmt=jitAmt, jitPct=jitPct) x1, y1 = max(0,x1 + dx), max(0,y1 + dy ) x2, y2 = min(w, x1 + cropSz), min(h, y1 + cropSz) ims = ims[y1:y2, x1:x2,:] #Resize and transpose ims = cv2.resize(ims,(imSz, imSz)) ims = ims.transpose((2,0,1)) return ims def get_rots(gp, imPrms, lbPrms, idx): ''' gp : group lbPrms: parameter for computing the labels idx : tuple (n1,n2) indicating which grp elements to extract ''' rollMax = imPrms['random_roll_max'] if rollMax == 0: rollJitter = None else: rollJitter = slu.get_roll_jitter(rollMax) n1, n2 = idx if rollJitter is None: r1, r2 = gp.data[n1].rots, gp.data[n2].rots else: r1 = copy.deepcopy(gp.data[n1].rots) r2 = copy.deepcopy(gp.data[n2].rots) r1[2] = r1[2] + rollJitter[0] r2[2] = r2[2] + rollJitter[1] lb = slu.get_normalized_pose_delta(lbPrms, r1, r2, pt1=gp.data[n1].pts.camera, pt2=gp.data[n2].pts.camera, debugMode=lbPrms['debugMode']) lb = np.array(lb) imFolder = imPrms['imRootFolder'] % gp.folderId imName1 = osp.join(imFolder, gp.crpImNames[n1]) imName2 = osp.join(imFolder, gp.crpImNames[n2]) im = read_double_images(imName1, imName2, imPrms, rollJitter=rollJitter) return im, lb #Sample which image pair to chose from the group def sample_within_group(gp, lbPrms): if gp.num == 1: print ('WARNING: Only 1 element in the group') l1 = np.random.permutation(gp.num) l2 = np.random.permutation(gp.num) done = False for n1 in l1: for n2 in l2: #Sample the same image rarely if n1 == n2: rnd = np.random.random() if rnd < 0.85: continue lb = slu.get_pose_delta_clip(lbPrms, gp.data[n1].rots, gp.data[n2].rots, pt1=gp.data[n1].pts.camera, pt2=gp.data[n2].pts.camera, debugMode=lbPrms['debugMode']) if lb is None: done = False continue else: done = True break if done: break #If valid label is found if done: return n1, n2 else: return None, None def read_groups(args): grp, fetchPrms, lbPrms, idx = args if lbPrms['type'] == 'pose': im, lb = get_rots(grp, fetchPrms, lbPrms, idx) else: raise Exception('Label type %s not recognized' % lbPrms['type']) return (im, lb) ## #Read data directly from groups class PythonGroupDataRotsLayer(caffe.Layer): @classmethod def parse_args(cls, argsStr): parser = argparse.ArgumentParser(description='PythonGroupDataRots Layer') parser.add_argument('--im_root_folder', default='', type=str) #The file which contains the name of groups parser.add_argument('--grplist_file', default='', type=str) #File containing information what kind of labels #should be extractee etc. parser.add_argument('--lbinfo_file', default='', type=str) parser.add_argument('--mean_file', default='', type=str) parser.add_argument('--batch_size', default=128, type=int) parser.add_argument('--crop_size', default=192, type=int) parser.add_argument('--im_size', default=101, type=int) parser.add_argument('--is_gray', dest='is_gray', action='store_true') parser.add_argument('--no-is_gray', dest='is_gray', action='store_false') parser.add_argument('--random_roll_max', default=0, type=float) parser.add_argument('--is_mirror', dest='is_mirror', action='store_true', default=False) parser.add_argument('--resume_iter', default=0, type=int) parser.add_argument('--jitter_pct', default=0, type=float) parser.add_argument('--jitter_amt', default=0, type=int) parser.add_argument('--nrmlz_file', default='None', type=str) parser.add_argument('--ncpu', default=2, type=int) #For debugging - load a single group parser.add_argument('--is_single_grp', dest='is_single_grp', action='store_true', default=False ) parser.add_argument('--no-is_single_grp', dest='is_single_grp', action='store_false') args = parser.parse_args(argsStr.split()) print('Using Config:') pprint.pprint(args) return args def __del__(self): self.pool_.terminate() del self.jobs_ def load_mean(self): self.mu_ = None if self.param_.mean_file == 'None': self.mu_ = 128 * np.ones((6, self.param_.im_size, self.param_.im_size), np.float32) else: #Mean is assumbed to be in BGR format self.mu_ = mp.read_mean(self.param_.mean_file) self.mu_ = self.mu_.astype(np.float32) ch, h, w = self.mu_.shape assert (h >= self.param_.crop_size and w >= self.param_.crop_size) y1 = int(h/2 - (self.param_.crop_size/2)) x1 = int(w/2 - (self.param_.crop_size/2)) y2 = int(y1 + self.param_.crop_size) x2 = int(x1 + self.param_.crop_size) self.mu_ = self.mu_[:,y1:y2,x1:x2] def setup(self, bottom, top): #Initialize the parameters self.param_ = PythonGroupDataRotsLayer.parse_args(self.param_str) if self.param_.is_gray: self.ch_ = 1 else: self.ch_ = 3 #debug mode self.debugMode_ = False #Read the groups print ('Loading Group Data') grpNameDat = pickle.load(open(self.param_.grplist_file, 'r')) grpFiles = grpNameDat['grpFiles'] self.grpDat_ = [] self.grpCount_ = [] numGrp = 0 if self.param_.is_single_grp: grpFiles = [grpFiles[0]] for i,g in enumerate(grpFiles): self.grpDat_.append(pickle.load(open(g, 'r'))['groups']) self.grpCount_.append(len(self.grpDat_[i])) print ('Groups in %s: %d' % (g, self.grpCount_[i])) numGrp += self.grpCount_[i] print ('Total number of groups: %d' % numGrp) self.grpSampleProb_ = [float(i)/float(numGrp) for i in self.grpCount_] print (self.grpSampleProb_) print (np.sum(np.array(self.grpSampleProb_))) #Define the parameters required to read data self.fetchPrms_ = {} self.fetchPrms_['isMirror'] = self.param_.is_mirror self.fetchPrms_['isGray'] = self.param_.is_gray self.fetchPrms_['cropSz'] = self.param_.crop_size self.fetchPrms_['imSz'] = self.param_.im_size self.fetchPrms_['imRootFolder'] = self.param_.im_root_folder self.fetchPrms_['jitter_pct'] = self.param_.jitter_pct self.fetchPrms_['jitter_amt'] = self.param_.jitter_amt self.fetchPrms_['random_roll_max'] = self.param_.random_roll_max self.fetchPrms_['debugMode'] = self.debugMode_ #Parameters that define how labels should be computed lbDat = pickle.load(open(self.param_.lbinfo_file)) self.lbPrms_ = lbDat['lbInfo'] self.lbPrms_['debugMode'] = self.debugMode_ self.lblSz_ = self.lbPrms_['lbSz'] if self.lbPrms_['nrmlz'] is not None: nrmlzDat = pickle.load(open(self.lbPrms_['statsFile'], 'r')) self.lbPrms_['nrmlzDat'] = edict() self.lbPrms_['nrmlzDat']['mu'] = nrmlzDat['mu'] self.lbPrms_['nrmlzDat']['sd'] = nrmlzDat['sd'] print (self.lbPrms_) if self.debugMode_: self.lblSz_ += 3 top[0].reshape(self.param_.batch_size, 2 * self.ch_, self.param_.im_size, self.param_.im_size) top[1].reshape(self.param_.batch_size, self.lblSz_ + 1, 1, 1) #Load the mean self.load_mean() #Create pool if self.param_.ncpu > 0: self.pool_ = Pool(processes=self.param_.ncpu) self.jobs_ = None #placeholders for data self.imData_ = np.zeros((self.param_.batch_size, 2 * self.ch_, self.param_.im_size, self.param_.im_size), np.float32) self.labels_ = np.ones((self.param_.batch_size, self.lblSz_ + 1,1,1),np.float32) #Which functions to use for reading images if 'cv2' in globals(): print('OPEN CV FOUND') self.readfn_ = read_groups else: print('OPEN CV NOT FOUND, USING SCM') self.readfn_ = read_groups #Launch the prefetching self.launch_jobs() self.t_ = time.time() #Launch jobs def launch_jobs(self): self.argList = [] #Form the list of groups that should be used for b in range(self.param_.batch_size): count = 0 while True: count += 1 rand = np.random.multinomial(1, self.grpSampleProb_) grpIdx = np.where(rand==1)[0][0] ng = np.random.randint(low=0, high=self.grpCount_[grpIdx]) n1, n2 = sample_within_group(self.grpDat_[grpIdx][ng], self.lbPrms_) if n1 is not None: break if np.mod(count,100) == 1 and count > 1: print ('TRIED %d times, cannot find a sample' % count) self.argList.append([self.grpDat_[grpIdx][ng], self.fetchPrms_, self.lbPrms_, (n1,n2)]) if self.param_.ncpu > 0: #Launch the jobs try: self.jobs_ = self.pool_.map_async(self.readfn_, self.argList) except KeyboardInterrupt: self.pool_.terminate() print 'Keyboard Interrupt received - terminating in launch jobs' raise Exception('Error/Interrupt Encountered') def normalize_labels(self): pass def get_prefetch_data(self): t1 = time.time() if self.param_.ncpu > 0: try: imRes = self.jobs_.get(20) except: self.pool_.terminate() raise Exception('Error/Interrupt Encountered') else: #print (self.argList[0]) imRes = [] for b in range(self.param_.batch_size): imRes.append(self.readfn_(self.argList[b])) #pdb.set_trace() t2= time.time() tFetch = t2 - t1 bCount = 0 for b in range(self.param_.batch_size): if imRes[b][1] is not None: if self.mu_ is not None: self.imData_[b,:,:,:] = imRes[b][0] - self.mu_ else: self.imData_[b,:,:,:] = imRes[b][0] #print (imRes[b][1].shape) self.labels_[b,0:self.lblSz_,:,:] = imRes[b][1].reshape(1,self.lblSz_,1,1).astype(np.float32) bCount += 1 else: #Donot use the label, image pair self.imData_[b,:,:,:] = 0. self.labels_[b,:,:,:] = 0. print ('Number of valid images in batch: %d' % bCount) self.normalize_labels() #print ('%d, Fetching: %f, Copying: %f' % (n, tFetch, time.time()-t2)) #glog.info('%d, Fetching: %f, Copying: %f' % (n, tFetch, time.time()-t2)) def forward(self, bottom, top): t1 = time.time() tDiff = t1 - self.t_ #Load the images self.get_prefetch_data() top[0].data[...] = self.imData_ t2 = time.time() tFetch = t2-t1 #Read the labels top[1].data[...] = self.labels_ self.launch_jobs() t2 = time.time() #print ('Forward took %fs in PythonWindowDataParallelLayer' % (t2-t1)) glog.info('Prev: %f, fetch: %f forward: %f' % (tDiff,tFetch, t2-t1)) self.t_ = time.time() def backward(self, top, propagate_down, bottom): """ This layer has no backward """ pass def reshape(self, bottom, top): """ This layer has no reshape """ pass def test_group_rots(isPlot=True, debugMode=True): import vis_utils as vu import matplotlib.pyplot as plt fig = plt.figure() defFile = osp.join(MODULE_PATH, 'test/test_group_rots.prototxt') net = caffe.Net(defFile, caffe.TEST) while True: data = net.forward(blobs=['pair_data', 'pose_label']) im, pk = data['pair_data'], data['pose_label'] im += 128 im = im.astype(np.uint8) if isPlot: for b in range(10): if debugMode: rots = tuple(pk[b].squeeze())[0:6] else: rots = tuple(pk[b].squeeze())[0:3] rots = [(r * 180.)/np.pi for r in rots] figTitle = 'yaw: %f, pitch: %f, roll: %f \n yaw: %f, pitch:%f, roll: %f'\ % (rots[0], rots[1], rots[2], rots[3], rots[4], rots[5]) ax = vu.plot_pairs(im[b,0:3], im[b,3:6], isBlobFormat=True, chSwap=(2,1,0), fig=fig, figTitle=figTitle) plt.draw() plt.show() ip = raw_input() if ip == 'q': return
bsd-3-clause
zblz/naima
src/naima/model_fitter.py
1
10519
# Licensed under a 3-clause BSD style license - see LICENSE.rst import astropy.units as u import numpy as np from .core import _prefit, lnprobmodel from .extern.validator import validate_array from .plot import _plot_data_to_ax, color_cycle from .utils import sed_conversion, validate_data_table __all__ = ["InteractiveModelFitter"] def _process_model(model): if ( isinstance(model, tuple) or isinstance(model, list) ) and not isinstance(model, np.ndarray): return model[0] else: return model class InteractiveModelFitter: """ Interactive model fitter using matplotlib widgets Parameters ---------- modelfn : function A function that takes a vector in the parameter space and the data table, and returns the expected fluxes at the energies in the spectrum. p0 : array Initial position vector. data : `~astropy.table.Table` or list of `~astropy.table.Table` Table or tables with observed spectra. Must follow the format described in `naima.run_sampler`. e_range : list of `~astropy.units.Quantity`, length 2, optional Limits in energy for the computation of the model. Note that setting this parameter will mean that the model output is computed twice when `data` is provided: once for display using `e_range` and once for computation of the log-probability using the energy values of the spectra. e_npoints : int, optional How many points to compute for the model if `e_range` is set. Default is 100. labels : iterable of strings, optional Labels for the parameters included in the position vector ``p0``. If not provided ``['par1','par2', ... ,'parN']`` will be used. sed : bool, optional Whether to plot SED or differential spectrum. auto_update : bool, optional Whether to update the model plot when parameter sliders are changed. Default is True and can also be changed through the GUI. """ def __init__( self, modelfn, p0, data=None, e_range=None, e_npoints=100, labels=None, sed=True, auto_update=True, ): import matplotlib.pyplot as plt from matplotlib.widgets import Button, Slider, CheckButtons self.pars = p0 self.P0_IS_ML = False npars = len(p0) if labels is None: labels = ["par{0}".format(i) for i in range(npars)] elif len(labels) < npars: labels += ["par{0}".format(i) for i in range(len(labels), npars)] self.hasdata = data is not None self.data = None if self.hasdata: self.data = validate_data_table(data, sed=sed) self.modelfn = modelfn self.fig = plt.figure() modelax = plt.subplot2grid((10, 4), (0, 0), rowspan=4, colspan=4) if e_range is not None: e_range = validate_array( "e_range", u.Quantity(e_range), physical_type="energy" ) energy = ( np.logspace( np.log10(e_range[0].value), np.log10(e_range[1].value), e_npoints, ) * e_range.unit ) if self.hasdata: energy = energy.to(self.data["energy"].unit) else: e_unit = e_range.unit else: energy = np.logspace(-4, 2, e_npoints) * u.TeV e_unit = u.TeV # Bogus flux array to send to model if not using data if sed: flux = np.zeros(e_npoints) * u.Unit("erg/(cm2 s)") else: flux = np.zeros(e_npoints) * u.Unit("1/(TeV cm2 s)") if self.hasdata: e_unit = self.data["energy"].unit _plot_data_to_ax(self.data, modelax, sed=sed, e_unit=e_unit) if e_range is None: # use data for model energy = self.data["energy"] flux = self.data["flux"] self.data_for_model = {"energy": energy, "flux": flux} model = _process_model(self.modelfn(p0, self.data_for_model)) if self.hasdata: if not np.array_equal( self.data_for_model["energy"].to(u.eV).value, self.data["energy"].to(u.eV).value, ): # this will be slow, maybe interpolate already computed model? model_for_lnprob = _process_model( self.modelfn(self.pars, self.data) ) else: model_for_lnprob = model lnprob = lnprobmodel(model_for_lnprob, self.data) if isinstance(lnprob, u.Quantity): lnprob = lnprob.decompose().value self.lnprobtxt = modelax.text( 0.05, 0.05, r"", ha="left", va="bottom", transform=modelax.transAxes, size=20, ) self.lnprobtxt.set_text( r"$\ln\mathcal{{L}} = {0:.1f}$".format(lnprob) ) self.f_unit, self.sedf = sed_conversion(energy, model.unit, sed) if self.hasdata: datamin = ( self.data["energy"][0] - self.data["energy_error_lo"][0] ).to(e_unit).value / 3 xmin = min(datamin, energy[0].to(e_unit).value) datamax = ( self.data["energy"][-1] + self.data["energy_error_hi"][-1] ).to(e_unit).value * 3 xmax = max(datamax, energy[-1].to(e_unit).value) modelax.set_xlim(xmin, xmax) else: # plot_data_to_ax has not set ylabel unit = self.f_unit.to_string("latex_inline") if sed: modelax.set_ylabel(r"$E^2 dN/dE$ [{0}]".format(unit)) else: modelax.set_ylabel(r"$dN/dE$ [{0}]".format(unit)) modelax.set_xlim(energy[0].value, energy[-1].value) self.line, = modelax.loglog( energy.to(e_unit), (model * self.sedf).to(self.f_unit), lw=2, c="k", zorder=10, ) modelax.set_xlabel( "Energy [{0}]".format(energy.unit.to_string("latex_inline")) ) paraxes = [] for n in range(npars): paraxes.append( plt.subplot2grid((2 * npars, 10), (npars + n, 0), colspan=7) ) self.parsliders = [] slider_props = {"facecolor": color_cycle[-1], "alpha": 0.5} for label, parax, valinit in zip(labels, paraxes, p0): # Attempt to estimate reasonable parameter ranges from label pmin, pmax = valinit / 10, valinit * 3 if "log" in label: span = 2 if "norm" in label or "amplitude" in label: # give more range for normalization parameters span *= 2 pmin, pmax = valinit - span, valinit + span elif ("index" in label) or ("alpha" in label): if valinit > 0.0: pmin, pmax = 0, 5 else: pmin, pmax = -5, 0 elif "norm" in label or "amplitude" in label: # norm without log, it will not be pretty because sliders are # only linear pmin, pmax = valinit / 100, valinit * 100 slider = Slider( parax, label, pmin, pmax, valinit=valinit, valfmt="%.4g", **slider_props ) slider.on_changed(self.update_if_auto) self.parsliders.append(slider) autoupdateax = plt.subplot2grid((8, 4), (4, 3), colspan=1, rowspan=1) auto_update_check = CheckButtons( autoupdateax, ("Auto update",), (auto_update,) ) auto_update_check.on_clicked(self.update_autoupdate) self.autoupdate = auto_update updateax = plt.subplot2grid((8, 4), (5, 3), colspan=1, rowspan=1) update_button = Button(updateax, "Update model") update_button.on_clicked(self.update) if self.hasdata: fitax = plt.subplot2grid((8, 4), (6, 3), colspan=1, rowspan=1) fit_button = Button(fitax, "Do Nelder-Mead fit") fit_button.on_clicked(self.do_fit) closeax = plt.subplot2grid((8, 4), (7, 3), colspan=1, rowspan=1) close_button = Button(closeax, "Close window") close_button.on_clicked(self.close_fig) self.fig.subplots_adjust(top=0.98, right=0.98, bottom=0.02, hspace=0.2) plt.show() def update_autoupdate(self, label): self.autoupdate = not self.autoupdate def update_if_auto(self, val): if self.autoupdate: self.update(val) def update(self, event): # If we update, values have changed and P0 is not ML anymore self.P0_IS_ML = False self.pars = [slider.val for slider in self.parsliders] model = _process_model(self.modelfn(self.pars, self.data_for_model)) self.line.set_ydata((model * self.sedf).to(self.f_unit)) if self.hasdata: if not np.array_equal( self.data_for_model["energy"].to(u.eV).value, self.data["energy"].to(u.eV).value, ): # this will be slow, maybe interpolate already computed model? model = _process_model(self.modelfn(self.pars, self.data)) lnprob = lnprobmodel(model, self.data) if isinstance(lnprob, u.Quantity): lnprob = lnprob.decompose().value self.lnprobtxt.set_text( r"$\ln\mathcal{{L}} = {0:.1f}$".format(lnprob) ) self.fig.canvas.draw_idle() def do_fit(self, event): self.pars = [slider.val for slider in self.parsliders] self.pars, P0_IS_ML = _prefit(self.pars, self.data, self.modelfn, None) autoupdate = self.autoupdate self.autoupdate = False if P0_IS_ML: for slider, val in zip(self.parsliders, self.pars): slider.set_val(val) self.update("after_fit") self.autoupdate = autoupdate self.P0_IS_ML = P0_IS_ML def close_fig(self, event): import matplotlib.pyplot as plt plt.close(self.fig)
bsd-3-clause
ssorgatem/qiime
scripts/plot_taxa_summary.py
15
12355
#!/usr/bin/env python # File created on 19 Jan 2011 from __future__ import division __author__ = "Jesse Stombaugh" __copyright__ = "Copyright 2011, The QIIME project" __credits__ = ["Jesse Stombaugh", "Julia Goodrich", "Justin Kuczynski", "John Chase", "Jose Antonio Navas Molina"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Jesse Stombaugh" __email__ = "jesse.stombaugh@colorado.edu" """ This script generates taxonomy charts """ from qiime.util import parse_command_line_parameters, get_qiime_project_dir from qiime.util import make_option from qiime.util import create_dir from qiime.plot_taxa_summary import make_all_charts from tempfile import mkdtemp from qiime.colors import taxonomy_color_prefs_and_map_data_from_options import re import matplotlib import os import shutil plot_filetype_choices = ['pdf', 'svg', 'png'] script_info = {} script_info['brief_description'] = """Make taxaonomy summary charts based on\ taxonomy assignment""" script_info['script_description'] = """This script automates the construction\ of pie, bar and area charts showing the breakdown of taxonomy by given levels.\ The script creates an html file for each chart type for easy visualization. It\ uses the taxonomy or category counts from summarize_taxa.py for combined\ samples by level (-i) and user specified labels for each file passed in (-l).\ Output will be written to the user specified folder (-o) the, where the\ default is the current working directory. The user can also specify the number\ of categories displayed for within a single pie chart, where the rest are\ grouped together as the 'other category' using the (-n) option, default is 20. """ script_info['script_usage'] = [] script_info['script_usage'].append(("""Examples:""", """If you wish to run the code using default parameters, you must supply a\ counts file (phylum.txt) along with the taxon level label (Phylum), the\ type(s) of charts to produce, and an output directory, by using the following\ command:""", """%prog -i phylum.txt -l phylum -c pie,bar,area -o phylum_charts/""")) script_info['script_usage'].append(("""""", """If you want to make charts for multiple levels at a time\ (phylum.txt,class.txt,genus.txt) use the following command:""", """%prog -i phylum.txt,class.txt,genus.txt -l Phylum,Class,Genus\ -c pie,bar,area -o phylum_class_genus_charts/""")) script_info['script_usage'].append(("""""", """Additionally, if you would like to display on a set number of taxa ("-n 10")\ in the pie charts, you can use the following command:""", """%prog -i class.txt -l Class -c pie -n 10 -o class_pie_n10_charts/""")) script_info['script_usage'].append(("""""", """If you would like to display generate pie charts for specific samples, i.e.\ sample 'PC.636' and sample 'PC.635' that are in the counts file header, you\ can use the following command:""", """%prog -i class.txt -l Class -b PC.636,PC.635 -o sample_charts/""")) script_info['output_description'] = """The script generates an output folder,\ which contains several files. For each pie chart there is a png and a pdf\ file. The best way to view all of the pie charts is by opening up the file\ taxonomy_summary_pie_chart.html.""" script_info['required_options'] = [ # dest should equal long-form parameter names! Can you clean this up? # Also note that you don't need to pass type='string' - that's the default make_option('-i', '--counts_fname', help='Input comma-separated list of summarized taxa filepaths' + ' (i.e results from summarize_taxa.py) [REQUIRED]', type='existing_filepaths'), ] script_info['optional_options'] = [ # changed this from type='string' (default) to type='int' make_option('-l', '--labels', help='Comma-separated list of taxonomic levels (e.g.' + ' Phylum,Class,Order) [default=%default]', default=None), make_option('-n', '--num_categories', dest='num_categories', help='The maximum number of taxonomies to show in each pie chart.' + ' All additional taxonomies are grouped into an "other" category.' + ' NOTE: this functionality only applies to the pie charts.' + ' [default: %default]', default=20, type='int'), make_option('-o', '--dir_path', help='Output directory', type='new_dirpath'), make_option('-b', '--colorby', dest='colorby', type='string', help='This is the categories to color by in the plots from the' + ' metadata mapping file. The categories must match the name of a ' + ' column header in the mapping file exactly and multiple categories' + ' can be list by comma separating them without spaces.' + ' [default=%default]'), make_option('-p', '--prefs_path', help='Input user-generated preferences filepath. NOTE: This is a' + ' file with a dictionary containing preferences for the analysis.' + ' The key taxonomy_coloring is used for the coloring.' + ' [default: %default]', type='existing_filepath'), make_option('-k', '--background_color', help='This is the background color to use in the plots' + ' (black or white) [default: %default]', default='white', type='choice', choices=['black', 'white'],), make_option('-d', '--dpi', help='This is the resolution of the plot. [default: %default]', type='int', default=80), make_option('-x', '--x_width', help='This is the width of the x-axis to use in the plots.' + ' [default: %default]', default=12, type='int'), make_option('-y', '--y_height', help='This is the height of the y-axis to use in the plots.' + ' [default: %default]', default=6, type='int'), make_option('-w', '--bar_width', help='This the width of the bars in the bar graph and should be a' + ' number between 0 and 1. NOTE: this only applies to the bar charts.' + ' [default: %default]', default=0.75, type='float'), make_option('-t', '--type_of_file', type='choice', help='This is the type of image to produce (i.e. ' + ','.join(plot_filetype_choices) + '). [default: %default]', choices=plot_filetype_choices, default='pdf'), make_option('-c', '--chart_type', type='multiple_choice', mchoices=['pie', 'bar', 'area'], help='This is the type of chart to plot (i.e. pie, bar or area).' + ' The user has the ability to plot multiple types, by using a' + ' comma-separated list (e.g. area,pie) [default: %default]', default='area,bar'), make_option('-r', '--resize_nth_label', type='int', help='Make every nth label larger than the other lables.' + ' This is for large area and bar charts where the font on the x-axis' + ' is small. This requires an integer value greater than 0.' + ' [default: %default]', default=0), make_option('-s', '--include_html_legend', action='store_true', dest='include_html_legend', default=False, help='Include HTML legend. If present, the writing of the legend' + ' in the html page is included. [default: %default]'), make_option('-a', '--label_type', type='choice', help='Label type ("numeric" or "categorical"). ' + ' If the label type is defined as numeric, the x-axis will be' + ' scaled accordingly. Otherwise the x-values will treated' + ' categorically and be evenly spaced [default: %default].', choices=['categorical', 'numeric'], default='categorical'), ] script_info['version'] = __version__ def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) # get QIIME directory qiime_dir = get_qiime_project_dir() if not opts.counts_fname: option_parser.error("A list of input files must be specified") # get color preferences color_prefs, color_data, background_color, label_color = \ taxonomy_color_prefs_and_map_data_from_options(opts) colorby = opts.colorby if colorby is None: colorby = [] for c in color_data['counts'].values(): colorby.extend(c[0]) else: colorby = colorby.strip().strip("'").split(',') counts_fname = opts.counts_fname # Define labels to use labels = opts.labels if not opts.labels: new_labels = [] # create an empty list since the user didn't specify labels for i in counts_fname: new_labels.append("") labels = ','.join(new_labels) data = [(label, f.strip()) for f, label in zip(counts_fname, labels.split(","))] filepath = data[0][1] filename = filepath.strip().rpartition('/')[0] num_categories = int(opts.num_categories) if num_categories <= 0: raise ValueError('The number of categories has to be greater than 0!') # create directory path dir_path = os.getcwd() if opts.dir_path: dir_path = opts.dir_path try: create_dir(opts.dir_path) except OSError: pass # make javascript output directory javascript_path = os.path.join(dir_path, 'js') try: create_dir(javascript_path) except OSError: # raised if dir exists pass # make raw_data output directory raw_data_path = os.path.join(dir_path, 'raw_data') try: create_dir(raw_data_path) except OSError: # raised if dir exists pass # move javascript file to javascript output directory shutil.copyfile(os.path.join(qiime_dir, 'qiime', 'support_files', 'js/overlib.js'), os.path.join(javascript_path, 'overlib.js')) # make css output directory css_path = os.path.join(dir_path, 'css') try: create_dir(css_path) except OSError: # raised if dir exists pass # move css file to css output directory shutil.copyfile(os.path.join(qiime_dir, 'qiime', 'support_files', 'css/qiime_style.css'), os.path.join(css_path, 'qiime_style.css')) # verify all parameters are valid plot_width = float(opts.x_width) if plot_width <= 0: raise ValueError('The width of the plot has to be greater than 0!') plot_height = float(opts.y_height) if plot_height <= 0: raise ValueError('The height of the plot has to be greater than 0!') bar_width = float(opts.bar_width) if bar_width <= 0 or bar_width > 1: raise ValueError( 'The bar width of the plot has to be between 0 and 1!') dpi = float(opts.dpi) if dpi <= 0: raise ValueError('The dpi of the plot has to be greater than 0!') resize_nth_label = int(opts.resize_nth_label) if resize_nth_label < 0: raise ValueError('The resize_nth_label of the plot has to be greater\ than 0!') generate_image_type = opts.type_of_file label_type = opts.label_type include_html_legend = opts.include_html_legend plots_to_make = opts.chart_type for chart_type in plots_to_make: # make pie chart output path charts_path = os.path.join(dir_path, 'charts') try: create_dir(charts_path) except OSError: # raised if dir exists pass make_all_charts(data, dir_path, filename, num_categories, colorby, args, color_data, color_prefs, background_color, label_color, chart_type, generate_image_type, plot_width, plot_height, bar_width, dpi, resize_nth_label, label_type, include_html_legend) if __name__ == "__main__": main()
gpl-2.0
pivotalsoftware/pymadlib
setup.py
1
4378
from setuptools import setup, find_packages from distutils.util import convert_path import os,sys from fnmatch import fnmatchcase # Provided as an attribute, so you can append to these instead # of replicating them: standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # Note: you may want to copy this into your setup.py file verbatim, as # you can't import this from another package, when you don't know if # that package is installed yet. def find_package_data( where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories, only_in_packages=True, show_ignored=False): """ Return a dictionary suitable for use in ``package_data`` in a distutils ``setup.py`` file. The dictionary looks like:: {'package': [files]} Where ``files`` is a list of all the files in that package that don't match anything in ``exclude``. If ``only_in_packages`` is true, then top-level directories that are not packages won't be included (but directories under packages will). Directories matching any pattern in ``exclude_directories`` will be ignored; by default directories with leading ``.``, ``CVS``, and ``_darcs`` will be ignored. If ``show_ignored`` is true, then all the files that aren't included in package data are shown on stderr (for debugging purposes). Note patterns use wildcards, or can be exact paths (including leading ``./``), and all searching is case-insensitive. """ out = {} stack = [(convert_path(where), '', package, only_in_packages)] while stack: where, prefix, package, only_in_packages = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True if show_ignored: print >> sys.stderr, ( "Directory %s ignored by pattern %s" % (fn, pattern)) break if bad_name: continue if (os.path.isfile(os.path.join(fn, '__init__.py')) and not prefix): if not package: new_package = name else: new_package = package + '.' + name stack.append((fn, '', new_package, False)) else: stack.append((fn, prefix + name + '/', package, only_in_packages)) elif package or not only_in_packages: # is a file bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True if show_ignored: print >> sys.stderr, ( "File %s ignored by pattern %s" % (fn, pattern)) break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup( name='pymadlib', version='1.0', author='Srivatsan Ramanujam', author_email='vatsan.cs@utexas.edu', url='http://vatsan.github.com/pymadlib', packages=find_packages(), package_data=find_package_data(only_in_packages=False,show_ignored=True), include_package_data=True, license='LICENSE.txt', description='A Python wrapper for MADlib (http://madlib.net) - an open source library for scalable in-database machine learning algorithms', long_description=open('README.txt').read(), install_requires=[ "psycopg2 >= 2.4.5", "pandas >= 0.13.0" ], )
bsd-2-clause
FNCS/ns-3.22
src/flow-monitor/examples/wifi-olsr-flowmon.py
108
7439
# -*- Mode: Python; -*- # Copyright (c) 2009 INESC Porto # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Gustavo Carneiro <gjc@inescporto.pt> import sys import ns.applications import ns.core import ns.flow_monitor import ns.internet import ns.mobility import ns.network import ns.olsr import ns.wifi try: import ns.visualizer except ImportError: pass DISTANCE = 100 # (m) NUM_NODES_SIDE = 3 def main(argv): cmd = ns.core.CommandLine() cmd.NumNodesSide = None cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)") cmd.Results = None cmd.AddValue("Results", "Write XML results to file") cmd.Plot = None cmd.AddValue("Plot", "Plot the results using the matplotlib python module") cmd.Parse(argv) wifi = ns.wifi.WifiHelper.Default() wifiMac = ns.wifi.NqosWifiMacHelper.Default() wifiPhy = ns.wifi.YansWifiPhyHelper.Default() wifiChannel = ns.wifi.YansWifiChannelHelper.Default() wifiPhy.SetChannel(wifiChannel.Create()) ssid = ns.wifi.Ssid("wifi-default") wifi.SetRemoteStationManager("ns3::ArfWifiManager") wifiMac.SetType ("ns3::AdhocWifiMac", "Ssid", ns.wifi.SsidValue(ssid)) internet = ns.internet.InternetStackHelper() list_routing = ns.internet.Ipv4ListRoutingHelper() olsr_routing = ns.olsr.OlsrHelper() static_routing = ns.internet.Ipv4StaticRoutingHelper() list_routing.Add(static_routing, 0) list_routing.Add(olsr_routing, 100) internet.SetRoutingHelper(list_routing) ipv4Addresses = ns.internet.Ipv4AddressHelper() ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0")) port = 9 # Discard port(RFC 863) onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory", ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port))) onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps"))) onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]")) onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]")) addresses = [] nodes = [] if cmd.NumNodesSide is None: num_nodes_side = NUM_NODES_SIDE else: num_nodes_side = int(cmd.NumNodesSide) for xi in range(num_nodes_side): for yi in range(num_nodes_side): node = ns.network.Node() nodes.append(node) internet.Install(ns.network.NodeContainer(node)) mobility = ns.mobility.ConstantPositionMobilityModel() mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0)) node.AggregateObject(mobility) devices = wifi.Install(wifiPhy, wifiMac, node) ipv4_interfaces = ipv4Addresses.Assign(devices) addresses.append(ipv4_interfaces.GetAddress(0)) for i, node in enumerate(nodes): destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)] #print i, destaddr onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port))) app = onOffHelper.Install(ns.network.NodeContainer(node)) urv = ns.core.UniformRandomVariable() app.Start(ns.core.Seconds(urv.GetValue(20, 30))) #internet.EnablePcapAll("wifi-olsr") flowmon_helper = ns.flow_monitor.FlowMonitorHelper() #flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31))) monitor = flowmon_helper.InstallAll() monitor = flowmon_helper.GetMonitor() monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001)) monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001)) monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20)) ns.core.Simulator.Stop(ns.core.Seconds(44.0)) ns.core.Simulator.Run() def print_stats(os, st): print >> os, " Tx Bytes: ", st.txBytes print >> os, " Rx Bytes: ", st.rxBytes print >> os, " Tx Packets: ", st.txPackets print >> os, " Rx Packets: ", st.rxPackets print >> os, " Lost Packets: ", st.lostPackets if st.rxPackets > 0: print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets) print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1)) print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1 if 0: print >> os, "Delay Histogram" for i in range(st.delayHistogram.GetNBins () ): print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \ st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i) print >> os, "Jitter Histogram" for i in range(st.jitterHistogram.GetNBins () ): print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \ st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i) print >> os, "PacketSize Histogram" for i in range(st.packetSizeHistogram.GetNBins () ): print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \ st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i) for reason, drops in enumerate(st.packetsDropped): print " Packets dropped by reason %i: %i" % (reason, drops) #for reason, drops in enumerate(st.bytesDropped): # print "Bytes dropped by reason %i: %i" % (reason, drops) monitor.CheckForLostPackets() classifier = flowmon_helper.GetClassifier() if cmd.Results is None: for flow_id, flow_stats in monitor.GetFlowStats(): t = classifier.FindFlow(flow_id) proto = {6: 'TCP', 17: 'UDP'} [t.protocol] print "FlowID: %i (%s %s/%s --> %s/%i)" % \ (flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort) print_stats(sys.stdout, flow_stats) else: print monitor.SerializeToXmlFile(cmd.Results, True, True) if cmd.Plot is not None: import pylab delays = [] for flow_id, flow_stats in monitor.GetFlowStats(): tupl = classifier.FindFlow(flow_id) if tupl.protocol == 17 and tupl.sourcePort == 698: continue delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets) pylab.hist(delays, 20) pylab.xlabel("Delay (s)") pylab.ylabel("Number of Flows") pylab.show() return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
gpl-2.0
Xevaquor/the-art-of-ai
ann/ml_plot.py
1
1759
# -*- coding: utf-8 -*- """ The art of an Artificial Intelligence http://art-of-ai.com https://github.com/Xevaquor/the-art-of-ai Module in this version does not handle w = [0,0,0] or w = [_, _, 0] properly """ __author__ = 'xevaquor' __license__ = 'MIT' import numpy as np import matplotlib.pyplot as plt def init_common(plot_size=[-5,5,-5,5]): plt.grid() ax = plt.axes() ax.set_aspect('equal') plt.axis(plot_size) def plot_boundary(w, fill=True, linecolor='black', pos_color='green', neg_color='purple'): X = np.linspace(-100,100,50) Y = (-w[1] * X - w[0]) / w[2] plt.plot(X, Y, color=linecolor) if not fill: return ax = plt.axes() angle = np.arctan2(w[2], w[1]) if angle < 0: pos_color, neg_color = neg_color, pos_color ax.fill_between(X, Y, y2=-100, interpolate=True, color=neg_color, alpha=0.2) ax.fill_between(X, Y, y2=100, interpolate=True, color=pos_color, alpha=0.2) def plot_normal_vector(w, head_len=0.5, head_width=0.3, color='black'): d = -w[0] / np.linalg.norm(w[1:]) angle = np.arctan2(w[2], w[1]) xoffset = np.cos(angle) * d yoffset = np.sin(angle) * d ax = plt.axes() if np.abs(head_len) > np.max(np.abs(w[1:])): print('[ml_plot] Arrow too $short, omitting') # yes, I know it is possible to # handle it return dx = w[1] - head_len * np.cos(angle) dy = w[2] - head_len * np.sin(angle) ax.arrow(xoffset, yoffset, dx, dy,ec=color, fc=color, head_width=head_width, head_length = head_len, antialiased=True) if __name__ == '__main__': w = [-2,2,2] init_common() plot_boundary(w, neg_color='orange') plot_normal_vector(w)
gpl-3.0
Nyker510/scikit-learn
examples/cross_decomposition/plot_compare_cross_decomposition.py
142
4761
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA ############################################################################### # Dataset based latent variables model n = 500 # 2 latents vars: l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X_train = X[:n / 2] Y_train = Y[:n / 2] X_test = X[n / 2:] Y_test = Y[n / 2:] print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) ############################################################################### # Canonical (symmetric) PLS # Transform data # ~~~~~~~~~~~~~~ plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ # 1) On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train") plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train") plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # 2) Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train") plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test") plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train") plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test") plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() ############################################################################### # PLS regression, with multivariate response, a.k.a. PLS2 n = 1000 q = 3 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coefs with B print("Estimated B") print(np.round(pls2.coefs, 1)) pls2.predict(X) ############################################################################### # PLS regression, with univariate response, a.k.a. PLS1 n = 1000 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of compements exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coefs, 1)) ############################################################################### # CCA (PLS mode B with symmetric deflation) cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
bsd-3-clause
SKA-ScienceDataProcessor/algorithm-reference-library
deprecated_code/workflows/mpi/plot-results.py
1
2132
#!/usr/bim/python # Script to plot the performance/scaling results import os import sys sys.path.append(os.path.join('..', '..')) results_dir = './results/timing-csd3/dask' #from matplotlib import pylab #pylab.rcParams['figure.figsize'] = (12.0, 12.0) #pylab.rcParams['image.cmap'] = 'rainbow' import numpy from matplotlib import pyplot as plt def read_results_file(filename): """ Read the results from a file and returns them as structured numpy array The order is (number_nodes, number_procs, numfreqw, time in sec) :param filename: filename :return: List of tuples as above """ d=numpy.loadtxt('%s/%s' %(results_dir,filename), dtype={'names': ('numnodes','numprocs','nfreqw','time'), 'formats': ('i','i','i','f')}, delimiter='\t') print(d) return d def plot_freqwin(data,fignum,figtitle): """ data is a 1d numpy array with the structure: names': ('numnodes','numprocs','nfreqw','time'), formats': ('i','i','i','f')}, """ nfreqwin_list=[41,71,101,203] plot_shapes=['ro','bs','g^','y*'] fig=plt.figure(fignum) fig.suptitle(figtitle) for i,nfreqwin in enumerate(nfreqwin_list): procs= [x['numprocs'] for x in data if x['nfreqw']==nfreqwin] nodes= [x['numnodes'] for x in data if x['nfreqw']==nfreqwin] times= [x['time'] for x in data if x['nfreqw']==nfreqwin] #plt.plot(procs,times,plot_shapes[i],linestyle='--',label="nfreqwin %d" % # (nfreqwin)) plt.plot(nodes,times,plot_shapes[i],linestyle='--',label="nfreqwin %d" % (nfreqwin)) plt.ylabel('time in seconds') plt.xlabel('number of processes') plt.yscale('log') plt.xscale('log') plt.legend(prop={'size':10}) #fig.savefig('%s-fig.jpg'%(figtitle)) return def main(): file_list=['predict','invert','contimg','ical'] for i,f in enumerate(file_list): d=read_results_file("%s-results.txt"%(f)) plot_freqwin(d,i,f) plt.show() return if __name__ == '__main__': main()
apache-2.0
hunter-cameron/Bioinformatics
job_scripts/plate_scrapes/checkm/compare_checkm.py
1
1571
import argparse import sys import pandas as pd def read_data_table(data_f): """ Converts raw CheckM data into a data frame. Should have an option to accept a .csv file from CheckM """ # return a df with the first column as an index and the first row as headers return pd.read_csv(data_f, sep="\t", header=0, index_col=0) def make_fraction_matrix(df1, df2, columns): """ Returns a matrix that contains the fraction the second data frame is of the first for each specified column """ return df2[columns].divide(df1[columns], axis=0, fill_value=0) def main(ref_f, queries_f): ref = read_data_table(ref_f) queries = [] for query_f in queries_f: queries.append(read_data_table(query_f)) #print(ref[['completeness', 'contamination']]) #print(queries[0][['completeness', 'contamination']]) #sys.exit() for query, file in zip(queries, queries_f): frac_matr = make_fraction_matrix(ref, query, ['completeness', 'contamination', 'genome_size']) print(frac_matr) print("{} Summary:".format(file)) print(frac_matr.sum(0, skipna=True)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Compares CheckM outputs") parser.add_argument("-ref", help="reference CheckM dataset, this should include the same isolates as all other sets", required=True) parser.add_argument("-query", help="one or more CheckM datasets to compare to the ref", nargs="+", required=True) args = parser.parse_args() main(args.ref, args.query)
mit
joernhees/scikit-learn
sklearn/tests/test_multioutput.py
23
12429
from __future__ import division import numpy as np import scipy.sparse as sp from sklearn.utils import shuffle from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.exceptions import NotFittedError from sklearn import datasets from sklearn.base import clone from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier from sklearn.linear_model import Lasso from sklearn.linear_model import SGDClassifier from sklearn.linear_model import SGDRegressor from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier def test_multi_target_regression(): X, y = datasets.make_regression(n_targets=3) X_train, y_train = X[:50], y[:50] X_test, y_test = X[50:], y[50:] references = np.zeros_like(y_test) for n in range(3): rgr = GradientBoostingRegressor(random_state=0) rgr.fit(X_train, y_train[:, n]) references[:, n] = rgr.predict(X_test) rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X_train, y_train) y_pred = rgr.predict(X_test) assert_almost_equal(references, y_pred) def test_multi_target_regression_partial_fit(): X, y = datasets.make_regression(n_targets=3) X_train, y_train = X[:50], y[:50] X_test, y_test = X[50:], y[50:] references = np.zeros_like(y_test) half_index = 25 for n in range(3): sgr = SGDRegressor(random_state=0) sgr.partial_fit(X_train[:half_index], y_train[:half_index, n]) sgr.partial_fit(X_train[half_index:], y_train[half_index:, n]) references[:, n] = sgr.predict(X_test) sgr = MultiOutputRegressor(SGDRegressor(random_state=0)) sgr.partial_fit(X_train[:half_index], y_train[:half_index]) sgr.partial_fit(X_train[half_index:], y_train[half_index:]) y_pred = sgr.predict(X_test) assert_almost_equal(references, y_pred) def test_multi_target_regression_one_target(): # Test multi target regression raises X, y = datasets.make_regression(n_targets=1) rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) assert_raises(ValueError, rgr.fit, X, y) def test_multi_target_sparse_regression(): X, y = datasets.make_regression(n_targets=3) X_train, y_train = X[:50], y[:50] X_test = X[50:] for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: rgr = MultiOutputRegressor(Lasso(random_state=0)) rgr_sparse = MultiOutputRegressor(Lasso(random_state=0)) rgr.fit(X_train, y_train) rgr_sparse.fit(sparse(X_train), y_train) assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test))) def test_multi_target_sample_weights_api(): X = [[1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [2.718, 3.141]] w = [0.8, 0.6] rgr = MultiOutputRegressor(Lasso()) assert_raises_regex(ValueError, "does not support sample weights", rgr.fit, X, y, w) # no exception should be raised if the base estimator supports weights rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X, y, w) def test_multi_target_sample_weight_partial_fit(): # weighted regressor X = [[1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [2.718, 3.141]] w = [2., 1.] rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0)) rgr_w.partial_fit(X, y, w) # weighted with different weights w = [2., 2.] rgr = MultiOutputRegressor(SGDRegressor(random_state=0)) rgr.partial_fit(X, y, w) assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0]) def test_multi_target_sample_weights(): # weighted regressor Xw = [[1, 2, 3], [4, 5, 6]] yw = [[3.141, 2.718], [2.718, 3.141]] w = [2., 1.] rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]] rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X, y) X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test)) # Import the data iris = datasets.load_iris() # create a multiple targets by randomized shuffling and concatenating y. X = iris.data y1 = iris.target y2 = shuffle(y1, random_state=1) y3 = shuffle(y1, random_state=2) y = np.column_stack((y1, y2, y3)) n_samples, n_features = X.shape n_outputs = y.shape[1] n_classes = len(np.unique(y1)) classes = list(map(np.unique, (y1, y2, y3))) def test_multi_output_classification_partial_fit_parallelism(): sgd_linear_clf = SGDClassifier(loss='log', random_state=1) mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1) mor.partial_fit(X, y, classes) est1 = mor.estimators_[0] mor.partial_fit(X, y) est2 = mor.estimators_[0] # parallelism requires this to be the case for a sane implementation assert_false(est1 is est2) def test_multi_output_classification_partial_fit(): # test if multi_target initializes correctly with base estimator and fit # assert predictions work as expected for predict sgd_linear_clf = SGDClassifier(loss='log', random_state=1) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) # train the multi_target_linear and also get the predictions. half_index = X.shape[0] // 2 multi_target_linear.partial_fit( X[:half_index], y[:half_index], classes=classes) first_predictions = multi_target_linear.predict(X) assert_equal((n_samples, n_outputs), first_predictions.shape) multi_target_linear.partial_fit(X[half_index:], y[half_index:]) second_predictions = multi_target_linear.predict(X) assert_equal((n_samples, n_outputs), second_predictions.shape) # train the linear classification with each column and assert that # predictions are equal after first partial_fit and second partial_fit for i in range(3): # create a clone with the same state sgd_linear_clf = clone(sgd_linear_clf) sgd_linear_clf.partial_fit( X[:half_index], y[:half_index, i], classes=classes[i]) assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i]) sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i]) assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i]) def test_mutli_output_classifiation_partial_fit_no_first_classes_exception(): sgd_linear_clf = SGDClassifier(loss='log', random_state=1) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) assert_raises_regex(ValueError, "classes must be passed on the first call " "to partial_fit.", multi_target_linear.partial_fit, X, y) def test_multi_output_classification(): # test if multi_target initializes correctly with base estimator and fit # assert predictions work as expected for predict, prodict_proba and score forest = RandomForestClassifier(n_estimators=10, random_state=1) multi_target_forest = MultiOutputClassifier(forest) # train the multi_target_forest and also get the predictions. multi_target_forest.fit(X, y) predictions = multi_target_forest.predict(X) assert_equal((n_samples, n_outputs), predictions.shape) predict_proba = multi_target_forest.predict_proba(X) assert len(predict_proba) == n_outputs for class_probabilities in predict_proba: assert_equal((n_samples, n_classes), class_probabilities.shape) assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1), predictions) # train the forest with each column and assert that predictions are equal for i in range(3): forest_ = clone(forest) # create a clone with the same state forest_.fit(X, y[:, i]) assert_equal(list(forest_.predict(X)), list(predictions[:, i])) assert_array_equal(list(forest_.predict_proba(X)), list(predict_proba[i])) def test_multiclass_multioutput_estimator(): # test to check meta of meta estimators svc = LinearSVC(random_state=0) multi_class_svc = OneVsRestClassifier(svc) multi_target_svc = MultiOutputClassifier(multi_class_svc) multi_target_svc.fit(X, y) predictions = multi_target_svc.predict(X) assert_equal((n_samples, n_outputs), predictions.shape) # train the forest with each column and assert that predictions are equal for i in range(3): multi_class_svc_ = clone(multi_class_svc) # create a clone multi_class_svc_.fit(X, y[:, i]) assert_equal(list(multi_class_svc_.predict(X)), list(predictions[:, i])) def test_multiclass_multioutput_estimator_predict_proba(): seed = 542 # make test deterministic rng = np.random.RandomState(seed) # random features X = rng.normal(size=(5, 5)) # random labels y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes Y = np.concatenate([y1, y2], axis=1) clf = MultiOutputClassifier(LogisticRegression(random_state=seed)) clf.fit(X, Y) y_result = clf.predict_proba(X) y_actual = [np.array([[0.23481764, 0.76518236], [0.67196072, 0.32803928], [0.54681448, 0.45318552], [0.34883923, 0.65116077], [0.73687069, 0.26312931]]), np.array([[0.5171785, 0.23878628, 0.24403522], [0.22141451, 0.64102704, 0.13755846], [0.16751315, 0.18256843, 0.64991843], [0.27357372, 0.55201592, 0.17441036], [0.65745193, 0.26062899, 0.08191907]])] for i in range(len(y_actual)): assert_almost_equal(y_result[i], y_actual[i]) def test_multi_output_classification_sample_weights(): # weighted classifier Xw = [[1, 2, 3], [4, 5, 6]] yw = [[3, 2], [2, 3]] w = np.asarray([2., 1.]) forest = RandomForestClassifier(n_estimators=10, random_state=1) clf_w = MultiOutputClassifier(forest) clf_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] y = [[3, 2], [3, 2], [2, 3]] forest = RandomForestClassifier(n_estimators=10, random_state=1) clf = MultiOutputClassifier(forest) clf.fit(X, y) X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) def test_multi_output_classification_partial_fit_sample_weights(): # weighted classifier Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] yw = [[3, 2], [2, 3], [3, 2]] w = np.asarray([2., 1., 1.]) sgd_linear_clf = SGDClassifier(random_state=1) clf_w = MultiOutputClassifier(sgd_linear_clf) clf_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] y = [[3, 2], [3, 2], [2, 3], [3, 2]] sgd_linear_clf = SGDClassifier(random_state=1) clf = MultiOutputClassifier(sgd_linear_clf) clf.fit(X, y) X_test = [[1.5, 2.5, 3.5]] assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) def test_multi_output_exceptions(): # NotFittedError when fit is not done but score, predict and # and predict_proba are called moc = MultiOutputClassifier(LinearSVC(random_state=0)) assert_raises(NotFittedError, moc.predict, y) assert_raises(NotFittedError, moc.predict_proba, y) assert_raises(NotFittedError, moc.score, X, y) # ValueError when number of outputs is different # for fit and score y_new = np.column_stack((y1, y2)) moc.fit(X, y) assert_raises(ValueError, moc.score, X, y_new)
bsd-3-clause
pompiduskus/scikit-learn
doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py
256
2406
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess wether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.datasets import load_files from sklearn.cross_validation import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters # TASK: print the cross-validated scores for the each parameters set # explored by the grid search # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show()
bsd-3-clause
judaba13/GenrePredictor
dtCV.py
1
4618
''' CS4780 Final Project Post Pruning and Cross Validation @author: Kelsey Duncan ked83 ''' from matplotlib import pyplot as plt from dt import * import random #get data trainData, testData = getData() depths = range(2, 33) testAccuracies = [] testPrecisions = [] trainAccuracies = [] trainPrecisions = [] #train a tree on 'trData' and prune it to depth 'i' then test on tsData #return accuracy and precision def prune(i, trData, tsData): tree = trainDT(trData) tree.postPrune(tree.root, i) a, p = tree.test(tree, tsData) return a, p #get test and train accuracies and precisions for each depth def fillArrays(): for i in range(2, 33): a, p = prune(i, trainData, testData) testAccuracies.append(a) testPrecisions.append(p) atr, ptr = prune(i, trainData, trainData) trainAccuracies.append(atr) trainPrecisions.append(ptr) #print str(testAccuracies) #print str(testPrecisions) #print str(trainAccuracies) #print str(trainPrecisions) #plot depth vs 'yVals' def pltG(title, yVals): plt.figure() plt.title(title + " vs Depth") plt.xlabel("Max Depth") plt.ylabel(title) plt.plot(depths, yVals, ".") plt.savefig("DT "+ title + ".png") def plotGraphs(): pltG("Test Accuracy", testAccuracies) pltG("Test Precision", testPrecisions) pltG("Training Accuracy", trainAccuracies) pltG("Training Precision", trainPrecisions) plt.figure() plt.title("Training and Test Accuracy vs Depth") plt.xlabel("Max Depth") plt.ylabel("Accuracy") plt.plot(depths, trainAccuracies, "x", label="on training data") plt.plot(depths, testAccuracies, ".", label="on test data") plt.legend() plt.savefig("DT Train & Test Accuracy.png") plt.figure() plt.title("Training and Test Precision vs Depth") plt.xlabel("Max Depth") plt.ylabel("Precision") plt.plot(depths, trainPrecisions, "x", label="on training data") plt.plot(depths, testPrecisions, ".", label="on test data") plt.legend() plt.savefig("DT Train & Test Precision.png") #plot cross validation accuracies and precisions def plotCV(results, f, title): x = [results[r][2] for r in results if results[r][0] != None] y = [results[r][0] for r in results if results[r][0] != None] plt.figure() plt.title(title+ ": " + str(f) + "-fold CV Accuracy vs d") plt.xlabel("d") plt.ylabel("average accuracy") plt.plot(x, y, ".") plt.savefig(title + "CV Accuracy.png") x = [results[r][2] for r in results if results[r][1] != None] y = [results[r][1] for r in results if results[r][1] != None] plt.figure() plt.title(title+ ": " + str(f) + "-fold CV Precision vs d") plt.xlabel("d") plt.ylabel("average precision") plt.plot(x, y, ".") plt.savefig(title + "CV Precision.png") #get accuracy and precision for depth 'd' training on trainData and testing on testData def test(trainData, testData, d): a, p = prune(d, trainData, testData) result = [] result.append(a) result.append(p) return result #cross validate with 'f' folds def crossValidate(trainD, testD, depths, f, title): random.shuffle(trainD) size = float(len(trainD))/f results = {d:[0., 0.] for d in depths} counts = {d:[0, 0] for d in depths} for i in range(f): for d in depths: result = test(trainD[0:int(i*size)]+trainD[int((i+1)*size):], trainD[int(i*size):int((i+1)*size)], d) for j in range(2): if result[j] != None: results[d][j] += result[j] counts[d][j] += 1 for d in depths: for j in range(2): if counts[d][j] > 0: results[d][j] /= f else: results[d][j] = None results[d].append(d) d = max(results, key=lambda r: results[r][0]) bestAccuracy = results[d] print "Best Accuracy: ", bestAccuracy d = max(results, key=lambda r: results[r][1]) bestPrecision = results[d] print "Best Precision:", bestPrecision plotCV(results, f, title) return (test(trainD, testD, bestAccuracy[2]), test(trainD, testD, bestPrecision[2])) def main(): print "filling arrays" fillArrays() print "plotting graphs" plotGraphs() print "Cross Validating.." bestA, bestP = crossValidate(trainData, testData, depths, 7, "DT ") print "Testing on best depth for accuracy.. accuracy = " + str(bestA[0]) + " precision = " + str(bestA[1]) print "Testing on best depth for precision.. accuracy = " + str(bestP[0]) + " precision = " + str(bestP[1]) main()
apache-2.0
smrjan/seldon-server
python/seldon/pipeline/tests/test_util.py
2
2599
import unittest from .. import util as sutils from .. import basic_transforms as bt import pandas as pd import json import os.path from sklearn.pipeline import Pipeline from sklearn.externals import joblib import logging class Test_PipelineWrapper(unittest.TestCase): def _create_test_json(self,fname,as_list=False): f = open(fname,"w") if as_list: f.write("[") j = {} j["a"] = 1 j["b"] = 2 f.write(json.dumps(j,sort_keys=True)) if as_list: f.write(",\n") else: f.write("\n") j = {} j["a"] = 3 j["b"] = 4 f.write(json.dumps(j,sort_keys=True)) if as_list: f.write("]\n") else: f.write("\n") f.close() def test_load_json_folders(self): w = sutils.PipelineWrapper() data_folder = w.get_work_folder()+"/events" if not os.path.exists(data_folder): os.makedirs(data_folder) fname = data_folder+"/"+"test.json" self._create_test_json(fname) df = w.create_dataframe([data_folder]) print df def test_load_json_file(self): w = sutils.PipelineWrapper() fname = w.get_work_folder()+"/"+"test.json" self._create_test_json(fname,as_list=True) df = w.create_dataframe(fname) print df def test_save_dataframe(self): w = sutils.PipelineWrapper() df = pd.DataFrame.from_dict([{"a":"a b","b":"c d","c":3},{"a":"word1","b":"word2"}]) fname = w.get_work_folder()+"/"+"saved.json" w.save_dataframe(df,fname,"csv",csv_index=False) df2 = w.create_dataframe(fname,df_format="csv") from pandas.util.testing import assert_frame_equal assert_frame_equal(df.sort(axis=1), df2.sort(axis=1), check_names=True) def test_save_pipeline(self): w = sutils.PipelineWrapper() t = bt.IncludeFeaturesTransform(included=["a","b"]) transformers = [("include_features",t)] p = Pipeline(transformers) df = pd.DataFrame.from_dict([{"a":1,"b":2,"c":3}]) df2 = p.fit_transform(df) self.assertTrue(sorted(df2.columns) == sorted(["a","b"])) dest_folder = w.get_work_folder()+"/dest_pipeline" w.save_pipeline(p,dest_folder) p2 = w.load_pipeline(dest_folder) df3 = p2.transform(df) self.assertTrue(sorted(df3.columns) == sorted(["a","b"])) if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) unittest.main()
apache-2.0
davidgbe/scikit-learn
sklearn/linear_model/stochastic_gradient.py
130
50966
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification and regression using Stochastic Gradient Descent (SGD).""" import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from ..externals.joblib import Parallel, delayed from .base import LinearClassifierMixin, SparseCoefMixin from ..base import BaseEstimator, RegressorMixin from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import (check_array, check_random_state, check_X_y, deprecated) from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted from ..externals import six from .sgd_fast import plain_sgd, average_sgd from ..utils.fixes import astype from ..utils.seq_dataset import ArrayDataset, CSRDataset from ..utils import compute_class_weight from .sgd_fast import Hinge from .sgd_fast import SquaredHinge from .sgd_fast import Log from .sgd_fast import ModifiedHuber from .sgd_fast import SquaredLoss from .sgd_fast import Huber from .sgd_fast import EpsilonInsensitive from .sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3, "pa1": 4, "pa2": 5} PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} SPARSE_INTERCEPT_DECAY = 0.01 """For sparse data intercept updates are scaled by this decay factor to avoid intercept oscillation.""" DEFAULT_EPSILON = 0.1 """Default value of ``epsilon`` parameter. """ class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)): """Base class for SGD classification and regression.""" def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, warm_start=False, average=False): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.n_iter = n_iter self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.warm_start = warm_start self.average = average self._validate_params() self.coef_ = None if self.average > 0: self.standard_coef_ = None self.average_coef_ = None # iteration count for learning rate schedule # must not be int (e.g. if ``learning_rate=='optimal'``) self.t_ = None def set_params(self, *args, **kwargs): super(BaseSGD, self).set_params(*args, **kwargs) self._validate_params() return self @abstractmethod def fit(self, X, y): """Fit model.""" def _validate_params(self): """Validate input params. """ if not isinstance(self.shuffle, bool): raise ValueError("shuffle must be either True or False") if self.n_iter <= 0: raise ValueError("n_iter must be > zero") if not (0.0 <= self.l1_ratio <= 1.0): raise ValueError("l1_ratio must be in [0, 1]") if self.alpha < 0.0: raise ValueError("alpha must be >= 0") if self.learning_rate in ("constant", "invscaling"): if self.eta0 <= 0.0: raise ValueError("eta0 must be > 0") # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if self.loss not in self.loss_functions: raise ValueError("The loss %s is not supported. " % self.loss) def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``. """ try: loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) except KeyError: raise ValueError("The loss %s is not supported. " % loss) def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] except KeyError: raise ValueError("learning rate %s " "is not supported. " % learning_rate) def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] except KeyError: raise ValueError("Penalty %s is not supported. " % penalty) def _validate_sample_weight(self, sample_weight, n_samples): """Set the sample weight array.""" if sample_weight is None: # uniform sample weights sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: # user-provided array sample_weight = np.asarray(sample_weight, dtype=np.float64, order="C") if sample_weight.shape[0] != n_samples: raise ValueError("Shapes of X and sample_weight do not match.") return sample_weight def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order="C") # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes, ): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ for binary problem if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not " "match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ for binary problem if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init.reshape(1,) else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order="C") def _make_dataset(X, y_i, sample_weight): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. """ if sp.issparse(X): dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight) intercept_decay = SPARSE_INTERCEPT_DECAY else: dataset = ArrayDataset(X, y_i, sample_weight) intercept_decay = 1.0 return dataset, intercept_decay def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est.standard_coef_.ravel() intercept = est.standard_intercept_[0] average_coef = est.average_coef_.ravel() average_intercept = est.average_intercept_[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est.standard_coef_[i] intercept = est.standard_intercept_[i] average_coef = est.average_coef_[i] average_intercept = est.average_intercept_[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter, pos_weight, neg_weight, sample_weight): """Fit a single binary classifier. The i'th class is considered the "positive" class. """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = \ _prepare_fit_binary(est, y, i) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] dataset, intercept_decay = _make_dataset(X, y_i, sample_weight) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) # XXX should have random_state_! random_state = check_random_state(est.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if not est.average: return plain_sgd(coef, intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay) else: standard_coef, standard_intercept, average_coef, \ average_intercept = average_sgd(coef, intercept, average_coef, average_intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay, est.average) if len(est.classes_) == 2: est.average_intercept_[0] = average_intercept else: est.average_intercept_[i] = average_intercept return standard_coef, standard_intercept class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD, LinearClassifierMixin)): loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log": (Log, ), "modified_huber": (ModifiedHuber, ), "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) self.class_weight = class_weight self.classes_ = None self.n_jobs = int(n_jobs) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, classes, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape self._validate_params() _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight(self.class_weight, self.classes_, y) sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None or coef_init is not None: self._allocate_parameter_mem(n_classes, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) self.loss_function = self._get_loss_function(loss) if self.t_ is None: self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) elif n_classes == 2: self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) else: raise ValueError("The number of class labels must be " "greater than one.") return self def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if hasattr(self, "classes_"): self.classes_ = None X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class classes = np.unique(y) if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, classes, sample_weight, coef_init, intercept_init) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, n_iter): """Fit a binary classifier on X and y. """ coef, intercept = fit_binary(self, 1, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += n_iter * X.shape[0] # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self.average_coef_.reshape(1, -1) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, -1) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, n_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All. """ # Use joblib to fit OvA in parallel. result = Parallel(n_jobs=self.n_jobs, backend="threading", verbose=self.verbose)( delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[i], 1., sample_weight) for i in range(len(self.classes_))) for i, (_, intercept) in enumerate(result): self.intercept_[i] = intercept self.t_ += n_iter * X.shape[0] if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ def partial_fit(self, X, y, classes=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ if self.class_weight in ['balanced', 'auto']: raise ValueError("class_weight '{0}' is not supported for " "partial_fit. In order to use 'balanced' weights, " "use compute_class_weight('{0}', classes, y). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.".format(self.class_weight)) return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the contructor) if class_weight is specified Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin): """Linear classifiers (SVM, logistic regression, a.o.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning, see the partial_fit method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\ 'perceptron', or a regression loss: 'squared_loss', 'huber',\ 'epsilon_insensitive', or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'hinge', which gives a linear SVM. The 'log' loss gives logistic regression, a probabilistic classifier. 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. 'squared_hinge' is like hinge but is quadratically penalized. 'perceptron' is the linear loss used by the perceptron algorithm. The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. learning_rate : string, optional The learning rate schedule: constant: eta = eta0 optimal: eta = 1.0 / (t + t0) [default] invscaling: eta = eta0 / pow(t, power_t) where t0 is chosen by a heuristic proposed by Leon Bottou. eta0 : double The initial learning rate for the 'constant' or 'invscaling' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\ n_features) Weights assigned to the features. intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> clf = linear_model.SGDClassifier() >>> clf.fit(X, Y) ... #doctest: +NORMALIZE_WHITESPACE SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=True, verbose=0, warm_start=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- LinearSVC, LogisticRegression, Perceptron """ def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(SGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, class_weight=class_weight, warm_start=warm_start, average=average) def _check_proba(self): check_is_fitted(self, "t_") if self.loss not in ("log", "modified_huber"): raise AttributeError("probability estimates are not available for" " loss=%r" % self.loss) @property def predict_proba(self): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ self._check_proba() return self._predict_proba def _predict_proba(self, X): if self.loss == "log": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = (len(self.classes_) == 2) scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1. prob /= 2. if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = (prob_sum == 0) if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError("predict_(log_)proba only supported when" " loss='log' or loss='modified_huber' " "(%r given)" % self.loss) @property def predict_log_proba(self): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) class BaseSGDRegressor(BaseSGD, RegressorMixin): loss_functions = { "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64) y = astype(y, np.float64, copy=False) n_samples, n_features = X.shape self._validate_params() # Allocate datastructures from input arguments sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) if self.average > 0 and self.average_coef_ is None: self.average_coef_ = np.zeros(n_features, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor(X, y, alpha, C, loss, learning_rate, sample_weight, n_iter) return self def partial_fit(self, X, y, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None) def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_intercept_ = self.intercept_ self.standard_coef_ = self.coef_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None return self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, sample_weight, coef_init, intercept_init) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _fit_regressor(self, X, y, alpha, C, loss, learning_rate, sample_weight, n_iter): dataset, intercept_decay = _make_dataset(X, y, sample_weight) loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if self.t_ is None: self.t_ = 1.0 random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if self.average > 0: self.standard_coef_, self.standard_intercept_, \ self.average_coef_, self.average_intercept_ =\ average_sgd(self.standard_coef_, self.standard_intercept_[0], self.average_coef_, self.average_intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay, self.average) self.average_intercept_ = np.atleast_1d(self.average_intercept_) self.standard_intercept_ = np.atleast_1d(self.standard_intercept_) self.t_ += n_iter * X.shape[0] if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.intercept_ = self.standard_intercept_ else: self.coef_, self.intercept_ = \ plain_sgd(self.coef_, self.intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay) self.t_ += n_iter * X.shape[0] self.intercept_ = np.atleast_1d(self.intercept_) class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \ or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'squared_loss' which refers to the ordinary least squares fit. 'huber' modifies 'squared_loss' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level. epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(alpha * t) invscaling: eta = eta0 / pow(t, power_t) [default] eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10 will`` begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (n_features,) Weights assigned to the features. intercept_ : array, shape (1,) The intercept term. average_coef_ : array, shape (n_features,) Averaged weights assigned to the features. average_intercept_ : array, shape (1,) The averaged intercept term. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.SGDRegressor() >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25, random_state=None, shuffle=True, verbose=0, warm_start=False) See also -------- Ridge, ElasticNet, Lasso, SVR """ def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(SGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average)
bsd-3-clause
danche354/Sequence-Labeling
ner/senna-hash-2-pos-chunk-gazetteer-128-64.py
1
7761
from keras.models import Model from keras.layers import Input, Masking, Dense, LSTM from keras.layers import Dropout, TimeDistributed, Bidirectional, merge from keras.layers.embeddings import Embedding from keras.utils import np_utils import numpy as np import pandas as pd import sys import math import os from datetime import datetime # add path sys.path.append('../') sys.path.append('../tools') from tools import conf from tools import load_data from tools import prepare from tools import plot np.random.seed(0) # train hyperparameters step_length = conf.ner_step_length pos_length = conf.ner_pos_length chunk_length = conf.ner_chunk_length gazetteer_length = conf.gazetteer_length emb_vocab = conf.senna_vocab emb_length = conf.senna_length hash_vocab = conf.ner_hash_vocab hash_length = conf.ner_hash_length output_length = conf.ner_IOB_length batch_size = conf.batch_size nb_epoch = conf.nb_epoch model_name = os.path.basename(__file__)[:-3] folder_path = 'model/%s'%model_name if not os.path.isdir(folder_path): os.makedirs(folder_path) # the data, shuffled and split between train and test sets train_data = load_data.load_ner(dataset='eng.train') dev_data = load_data.load_ner(dataset='eng.testa') train_samples = len(train_data) dev_samples = len(dev_data) print('train shape:', train_samples) print('dev shape:', dev_samples) print() word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None) word_embedding = word_embedding.values word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))]) hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None) hash_embedding = hash_embedding.values hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)]) embed_index_input = Input(shape=(step_length,)) embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input) hash_index_input = Input(shape=(step_length,)) encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input) pos_input = Input(shape=(step_length, pos_length)) chunk_input = Input(shape=(step_length, chunk_length)) gazetteer_input = Input(shape=(step_length, gazetteer_length)) senna_hash_pos_chunk_gazetteer_merge = merge([embedding, encoder_embedding, pos_input, chunk_input, gazetteer_input], mode='concat') input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge) dp_1 = Dropout(0.5)(input_mask) hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1) hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1) dp_2 = Dropout(0.5)(hidden_2) output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2) model = Model(input=[embed_index_input,hash_index_input,pos_input,chunk_input, gazetteer_input], output=output) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) print(model.summary()) number_of_train_batches = int(math.ceil(float(train_samples)/batch_size)) number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size)) print('start train %s ...\n'%model_name) best_accuracy = 0 best_epoch = 0 all_train_loss = [] all_dev_loss = [] all_dev_accuracy = [] log = open('%s/model_log.txt'%folder_path, 'w') start_time = datetime.now() print('train start at %s\n'%str(start_time)) log.write('train start at %s\n\n'%str(start_time)) for epoch in range(nb_epoch): start = datetime.now() print('-'*60) print('epoch %d start at %s'%(epoch, str(start))) log.write('-'*60+'\n') log.write('epoch %d start at %s\n'%(epoch, str(start))) train_loss = 0 dev_loss = 0 np.random.shuffle(train_data) for i in range(number_of_train_batches): train_batch = train_data[i*batch_size: (i+1)*batch_size] embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, gram='bi') pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)]) gazetteer, length_2 = prepare.prepare_gazetteer(batch=train_batch) gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) train_metrics = model.train_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y) train_loss += train_metrics[0] all_train_loss.append(train_loss) correct_predict = 0 all_predict = 0 for j in range(number_of_dev_batches): dev_batch = dev_data[j*batch_size: (j+1)*batch_size] embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, gram='bi') pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)]) gazetteer, length_2 = prepare.prepare_gazetteer(batch=dev_batch) gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) # for loss dev_metrics = model.test_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y) dev_loss += dev_metrics[0] # for accuracy prob = model.predict_on_batch([embed_index, hash_index, pos, chunk, gazetteer]) for i, l in enumerate(length): predict_label = np_utils.categorical_probas_to_classes(prob[i]) correct_predict += np.sum(predict_label[:l]==label[i][:l]) all_predict += np.sum(length) epcoh_accuracy = float(correct_predict)/all_predict all_dev_accuracy.append(epcoh_accuracy) all_dev_loss.append(dev_loss) if epcoh_accuracy>=best_accuracy: best_accuracy = epcoh_accuracy best_epoch = epoch end = datetime.now() model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True) print('epoch %d end at %s'%(epoch, str(end))) print('epoch %d train loss: %f'%(epoch, train_loss)) print('epoch %d dev loss: %f'%(epoch, dev_loss)) print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy)) print('best epoch now: %d\n'%best_epoch) log.write('epoch %d end at %s\n'%(epoch, str(end))) log.write('epoch %d train loss: %f\n'%(epoch, train_loss)) log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss)) log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy)) log.write('best epoch now: %d\n\n'%best_epoch) end_time = datetime.now() print('train end at %s\n'%str(end_time)) log.write('train end at %s\n\n'%str(end_time)) timedelta = end_time - start_time print('train cost time: %s\n'%str(timedelta)) print('best epoch last: %d\n'%best_epoch) log.write('train cost time: %s\n\n'%str(timedelta)) log.write('best epoch last: %d\n\n'%best_epoch) plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name) plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
mit
sahg/PyTOPKAPI
pytopkapi/results_analysis/plot_Qsim_Qobs_Rain.py
2
3658
import datetime as dt from configparser import SafeConfigParser import h5py import numpy as np import matplotlib.pyplot as plt from matplotlib.dates import date2num import pytopkapi.utils as ut def run(ini_file='plot_Qsim_Qobs_Rain.ini'): config = SafeConfigParser() config.read(ini_file) print('Read the file ',ini_file) file_Qsim=config.get('files','file_Qsim') file_Qobs=config.get('files','file_Qobs') file_rain=config.get('files','file_rain') image_out=config.get('files','image_out') group_name=config.get('groups','group_name') Qobs=config.getboolean('flags','Qobs') Pobs=config.getboolean('flags','Pobs') nash=config.getboolean('flags','nash') tab_col=['k','r'] tab_style=['-','-'] tab_width=['1','1'] color_P='b' transparency_P=0.5#(0 for invisible) #create path_out if it does'nt exist ut.check_file_exist(image_out) #Read the obs #Qobs ar_date, ar_Qobs = read_observed_flow(file_Qobs) delta = date2num(ar_date[1]) - date2num(ar_date[0]) #Rain if Pobs: h5file = h5py.File(file_rain) dset_string = '/%s/rainfall' % group_name ndar_rain = h5file[dset_string][...] h5file.close() #Compute the mean catchment rainfall ar_rain=np.average(ndar_rain,axis=1) #Read the simulated data Q file_h5=file_Qsim ndar_Qc_out=ut.read_one_array_hdf(file_h5,'Channel','Qc_out') ar_Qsim=ndar_Qc_out[1:,0] ##Graph fig, ax = plt.subplots() lines = [] tab_leg = [] if Qobs: lines += ax.plot(ar_date, ar_Qobs, color=tab_col[-1], linestyle=tab_style[-1], linewidth=tab_width[-1]) tab_leg.append(('Observation')) tab_leg = tab_leg[::-1] lines += ax.plot(ar_date, ar_Qsim, color=tab_col[0], linestyle=tab_style[0], linewidth=tab_width[0]) tab_leg.append('Model') if nash: nash_value = ut.Nash(ar_Qsim,ar_Qobs) lines += ax.plot(ar_date[0:1], ar_Qsim[0:1], 'w:') tab_leg.append(('Eff = '+str(nash_value)[0:5])) ax.set_xlim(ar_date[0], ar_date[-1]) ytitle=r'$Q \ (m^3/s)$' ax.set_ylabel(ytitle, fontsize=18) ax.set_title(group_name) ax2 = ax.twinx() ax2.set_ylabel(r'$Rainfall \ (mm)$', fontsize=18, color=color_P) ax2.bar(ar_date, ar_rain, width=delta, facecolor='blue', edgecolor='blue', alpha=transparency_P) ax2.set_ylim(max(ar_rain)*2, min(ar_rain)) ax2.legend(lines, tab_leg, loc='upper right', fancybox=True) leg = ax2.get_legend() leg.get_frame().set_alpha(0.75) # rotate and align the tick labels so they look better, # unfortunately autofmt_xdate doesn't work with twinx due to a bug # in matplotlib <= 1.0.0 so we do it manually ## fig.autofmt_xdate() bottom=0.2 rotation=30 ha='right' for ax in fig.get_axes(): if hasattr(ax, 'is_last_row') and ax.is_last_row(): for label in ax.get_xticklabels(): label.set_ha(ha) label.set_rotation(rotation) else: for label in ax.get_xticklabels(): label.set_visible(False) ax.set_xlabel('') fig.subplots_adjust(bottom=bottom) fig.savefig(image_out) plt.show() def read_observed_flow(file_name): """Read the observed flow from a data file. """ date = np.loadtxt(file_name, dtype=np.int, usecols=(0, 1, 2, 3, 4)) dates = [dt.datetime(yr, mon, dy, hr, mn) for yr, mon, dy, hr, mn in date] Q = np.loadtxt(file_name, usecols=(5,)) return dates, Q
bsd-3-clause
neilhan/python_cv_learning
05-segment_grabcut/run_me.py
1
7745
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This code testing the segmenting images using GrabCut method. This code testing the segmenting images using GrabCut method. Author: Neil Han ! This sample code is a copy from https://github.com/Itseez/opencv/blob/master/samples/python2/grabcut.py @ 2013-11-29 The original code was created by abidrahmank, and SpecLad """ from __future__ import absolute_import, division, \ print_function, unicode_literals # import ipdb; ipdb.set_trace() ; # debugging------- import sys import logging import os import numpy as np import scipy.sparse as sp import cv2 cv2.namedWindow('GetArroundASegmentationFailure', 0) cv2.destroyWindow('GetArroundASegmentationFailure') import matplotlib.pyplot as plt import ava.utl import ava.cv.utl # this is the model object, carring the state, image etc model = None class GrabcutModel(object): RED = [0, 0, 255] GREEN = [0, 255, 0] BLUE = [255, 0, 0] WHITE = [255, 255, 255] BLACK = [0, 0, 0] DRAW_BG = {'color': BLACK, 'val': 0} DRAW_FG = {'color': WHITE, 'val': 1} DRAW_PR_BG = {'color': RED, 'val': 2} DRAW_PR_FG = {'color': GREEN, 'val': 3} def reset(self): self.rectangle = False self.rect = (0, 0, 1, 1) self.drawing = False self.rect_over = False self.rect_or_mask = 100 self.value = GrabcutModel.DRAW_FG # drawing brush init to FG self.thickness = 3 self.img = self.img_backup.copy() self.mask = np.zeros(self.img.shape[:2], dtype=np.uint8) self.output = np.zeros(self.img.shape, dtype=np.uint8) self.ix = 0 self.iy = 0 def __init__(self, img): super(GrabcutModel, self).__init__() self.img_backup = img # self.reset() self.rectangle = False self.rect = (0, 0, 1, 1) self.drawing = False self.rect_over = False self.rect_or_mask = 100 self.value = GrabcutModel.DRAW_FG # drawing brush init to FG self.thickness = 3 self.img = self.img_backup.copy() self.mask = np.zeros(self.img.shape[:2], dtype=np.uint8) self.output = np.zeros(self.img.shape, dtype=np.uint8) self.ix = 0 self.iy = 0 def get_output_img(self): bar = np.zeros((self.img.shape[0], 5, 3), np.uint8) result = np.hstack((self.img_backup, bar, self.img, bar, self.img_result)) return result def onmouse(event, x, y, flags, param): global model # Draw rectangle if event == cv2.EVENT_RBUTTONDOWN: model.rectangle = True model.ix, model.iy = x, y elif event == cv2.EVENT_MOUSEMOVE: # draw a new rectangle if model.rectangle == True: model.img = model.img_backup.copy() cv2.rectangle(model.img, (model.ix, model.iy), (x, y), model.BLUE, 2) model.rect_or_mask = 0 elif event == cv2.EVENT_RBUTTONUP: model.rectangle = False model.rect_over = True cv2.rectangle(model.img, (model.ix, model.iy), (x, y), model.BLUE, 2) model.rect = (model.ix, model.iy, abs(model.ix - x), abs(model.iy - y)) model.rect_or_mask = 0 print('Press "n" a few times until no further change.') # draw touchup curves if event == cv2.EVENT_LBUTTONDOWN: if model.rect_over == False: print('Please use your mouse right button to draw a rectangle.') else: model.drawing = True # draw a dot cv2.circle(model.img, (x, y), model.thickness, model.value['color'], -1) cv2.circle(model.mask, (x, y), model.thickness, model.value['val'], -1) elif event == cv2.EVENT_MOUSEMOVE: if model.drawing == True: # draw a dot cv2.circle(model.img, (x, y), model.thickness, model.value['color'], -1) cv2.circle(model.mask, (x, y), model.thickness, model.value['val'], -1) elif event == cv2.EVENT_LBUTTONUP: if model.drawing == True: model.drawing = False # draw a dot cv2.circle(model.img, (x, y), model.thickness, model.value['color'], -1) cv2.circle(model.mask, (x, y), model.thickness, model.value['val'], -1) # /// end onmouse @ava.utl.time_this def main(argv=None): global model if argv is None: argv = sys.argv # logger ava.utl.setup_logging() logger = logging.getLogger(__name__).getChild('main') logger.debug('starting main.') print('''Keys: 'ESC' to exit 0 to mark sure background with mouse button 1 to mark sure backgroud 2 to do PR_BG drawing, probable background 3 to do PR_FG drawing, probable foreground n to update the segmentation r to reset the setup s to save the results to ./test_output.png''') img_root_path = '../images' if len(argv) >= 2: img_file = argv[1] # use provided file else: print('Use default image.') img_file = os.path.join(img_root_path, 'dog2.jpg') # 'tiger.jpg') img = cv2.imread(img_file, cv2.CV_LOAD_IMAGE_COLOR) cv2.namedWindow('output') cv2.namedWindow('input') # cv2.namedWindow('mask') # cv2.namedWindow('mask2') cv2.setMouseCallback('input', onmouse) cv2.moveWindow('input', img.shape[1] + 10, 90) model = GrabcutModel(img) # init the model # start the main loop while(1): cv2.imshow('output', model.output) cv2.imshow('input', model.img) k = 0xFF & cv2.waitKey(10) if k == 27: # esc exit break elif k == ord('0'): # draw with BG print('Mark background regions with left mouse button.') model.value = GrabcutModel.DRAW_BG elif k == ord('1'): # draw with FG print('Mark foreground regions with left mouse button.') model.value = GrabcutModel.DRAW_FG elif k == ord('2'): # draw with PR_BG print('Mark probable background regions with left mouse button.') model.value = GrabcutModel.DRAW_PR_BG elif k == ord('3'): # draw with PR_FG print('Mark probable foreground regions with left mouse button.') model.value = GrabcutModel.DRAW_PR_FG elif k == ord('r'): # reset model print('Rest segments.') model.reset() elif k == ord('s'): # save result = model.get_output_img() cv2.imwrite('grabcut_output.png', result) elif k == ord('n'): # segment the image print('For finer touchups, mark foreground and background \ after pressing keys 0-3 and press "n"') if model.rect_or_mask == 0: # grabcut with rect bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) cv2.grabCut(model.img_backup, model.mask, model.rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT) model.rect_or_mask = 1 elif model.rect_or_mask == 1: # grabcut with mask bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) cv2.grabCut(model.img_backup, model.mask, model.rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK) model.mask2 = np.where((model.mask == 1) + (model.mask == 3), 255, 0).astype('uint8') # cv2.imshow('mask2', model.mask2) # cv2.imshow('mask', model.mask) model.output = cv2.bitwise_and(model.img_backup, model.img_backup, mask=model.mask2) # /// while loop ends cv2.destroyAllWindows() exit() # =================== if __name__ == "__main__": main()
bsd-3-clause
pratapvardhan/scikit-learn
sklearn/gaussian_process/gpc.py
42
31571
"""Gaussian processes classification.""" # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve from scipy.optimize import fmin_l_bfgs_b from scipy.special import erf from sklearn.base import BaseEstimator, ClassifierMixin, clone from sklearn.gaussian_process.kernels \ import RBF, CompoundKernel, ConstantKernel as C from sklearn.utils.validation import check_X_y, check_is_fitted, check_array from sklearn.utils import check_random_state from sklearn.preprocessing import LabelEncoder from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier # Values required for approximating the logistic sigmoid by # error functions. coefs are obtained via: # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) # b = logistic(x) # A = (erf(np.dot(x, self.lambdas)) + 1) / 2 # coefs = lstsq(A, b)[0] LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654])[:, np.newaxis] class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): """Binary Gaussian process classification based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of ``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer: int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict: int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_: array-like, shape = (n_samples,) Target values in training data (also required for prediction) classes_ : array-like, shape = (n_classes,) Unique class labels. kernel_: kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_: array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in X_train_ pi_: array-like, shape = (n_samples,) The probabilities of the positive class for the training points X_train_ W_sr_: array-like, shape = (n_samples,) Square root of W, the Hessian of log-likelihood of the latent function values for the observed labels. Since W is diagonal, only the diagonal of sqrt(W) is stored. log_marginal_likelihood_value_: float The log-marginal-likelihood of ``self.kernel_.theta`` """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self.rng = check_random_state(self.random_state) self.X_train_ = np.copy(X) if self.copy_X_train else X # Encode class labels and check that it is a binary classification # problem label_encoder = LabelEncoder() self.y_train_ = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ if self.classes_.size > 2: raise ValueError("%s supports only binary classification. " "y contains classes %s" % (self.__class__.__name__, self.classes_)) elif self.classes_.size == 1: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds)] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) _, (self.pi_, self.W_sr_, self.L_, _, _) = \ self._posterior_mode(K, return_temporaries=True) return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # As discussed on Section 3.4.2 of GPML, for making hard binary # decisions, it is enough to compute the MAP of the posterior and # pass it through the link function K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 return np.where(f_star > 0, self.classes_[1], self.classes_[0]) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``. """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # Based on Algorithm 3.2 of GPML K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 # Line 6 (compute np.diag(v.T.dot(v)) via einsum) var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) # Line 7: # Approximate \int log(z) * N(z | f_star, var_f_star) # Approximation is due to Williams & Barber, "Bayesian Classification # with Gaussian Processes", Appendix A: Approximate the logistic # sigmoid by a linear combination of 5 error functions. # For information on how this integral can be computed see # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html alpha = 1 / (2 * var_f_star) gamma = LAMBDAS * f_star integrals = np.sqrt(np.pi / alpha) \ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \ / (2 * np.sqrt(var_f_star * 2 * np.pi)) pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum() return np.vstack((1 - pi_star, pi_star)).T def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Compute log-marginal-likelihood Z and also store some temporaries # which can be reused for computing Z's gradient Z, (pi, W_sr, L, b, a) = \ self._posterior_mode(K, return_temporaries=True) if not eval_gradient: return Z # Compute gradient based on Algorithm 5.1 of GPML d_Z = np.empty(theta.shape[0]) # XXX: Get rid of the np.diag() in the next line R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \ * (pi * (1 - pi) * (1 - 2 * pi)) # third derivative for j in range(d_Z.shape[0]): C = K_gradient[:, :, j] # Line 11 # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel()) b = C.dot(self.y_train_ - pi) # Line 13 s_3 = b - K.dot(R.dot(b)) # Line 14 d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 return Z, d_Z def _posterior_mode(self, K, return_temporaries=False): """Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton's iteration to find the mode of this approximation. """ # Based on Algorithm 3.1 of GPML # If warm_start are enabled, we reuse the last solution for the # posterior mode as initialization; otherwise, we initialize with 0 if self.warm_start and hasattr(self, "f_cached") \ and self.f_cached.shape == self.y_train_.shape: f = self.f_cached else: f = np.zeros_like(self.y_train_, dtype=np.float64) # Use Newton's iteration method to find mode of Laplace approximation log_marginal_likelihood = -np.inf for _ in range(self.max_iter_predict): # Line 4 pi = 1 / (1 + np.exp(-f)) W = pi * (1 - pi) # Line 5 W_sr = np.sqrt(W) W_sr_K = W_sr[:, np.newaxis] * K B = np.eye(W.shape[0]) + W_sr_K * W_sr L = cholesky(B, lower=True) # Line 6 b = W * f + (self.y_train_ - pi) # Line 7 a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) # Line 8 f = K.dot(a) # Line 10: Compute log marginal likelihood in loop and use as # convergence criterion lml = -0.5 * a.T.dot(f) \ - np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \ - np.log(np.diag(L)).sum() # Check if we have converged (log marginal likelihood does # not decrease) # XXX: more complex convergence criterion if lml - log_marginal_likelihood < 1e-10: break log_marginal_likelihood = lml self.f_cached = f # Remember solution for later warm-starts if return_temporaries: return log_marginal_likelihood, (pi, W_sr, L, b, a) else: return log_marginal_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min class GaussianProcessClassifier(BaseEstimator, ClassifierMixin): """Gaussian process classification (GPC) based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. For multi-class classification, several binary one-versus rest classifiers are fitted. Note that this class thus does not implement a true multi-class Laplace approximation. Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer: int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict: int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. multi_class: string, default: "one_vs_rest" Specifies how multi-class classification problems are handled. Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest", one binary Gaussian process classifier is fitted for each class, which is trained to separate this class from the rest. In "one_vs_one", one binary Gaussian process classifier is fitted for each pair of classes, which is trained to separate these two classes. The predictions of these binary predictors are combined into multi-class predictions. Note that "one_vs_one" does not support predicting probability estimates. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- kernel_ : kernel object The kernel used for prediction. In case of binary classification, the structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters. In case of multi-class classification, a CompoundKernel is returned which consists of the different kernels used in the one-versus-rest classifiers. log_marginal_likelihood_value_: float The log-marginal-likelihood of ``self.kernel_.theta`` classes_ : array-like, shape = (n_classes,) Unique class labels. n_classes_ : int The number of classes in the training data """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class="one_vs_rest", n_jobs=1): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state self.multi_class = multi_class self.n_jobs = n_jobs def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, multi_output=False) self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( self.kernel, self.optimizer, self.n_restarts_optimizer, self.max_iter_predict, self.warm_start, self.copy_X_train, self.random_state) self.classes_ = np.unique(y) self.n_classes_ = self.classes_.size if self.n_classes_ == 1: raise ValueError("GaussianProcessClassifier requires 2 or more " "distinct classes. Only class %s present." % self.classes_[0]) if self.n_classes_ > 2: if self.multi_class == "one_vs_rest": self.base_estimator_ = \ OneVsRestClassifier(self.base_estimator_, n_jobs=self.n_jobs) elif self.multi_class == "one_vs_one": self.base_estimator_ = \ OneVsOneClassifier(self.base_estimator_, n_jobs=self.n_jobs) else: raise ValueError("Unknown multi-class mode %s" % self.multi_class) self.base_estimator_.fit(X, y) if self.n_classes_ > 2: self.log_marginal_likelihood_value_ = np.mean( [estimator.log_marginal_likelihood() for estimator in self.base_estimator_.estimators_]) else: self.log_marginal_likelihood_value_ = \ self.base_estimator_.log_marginal_likelihood() return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self, ["classes_", "n_classes_"]) X = check_array(X) return self.base_estimator_.predict(X) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self, ["classes_", "n_classes_"]) if self.n_classes_ > 2 and self.multi_class == "one_vs_one": raise ValueError("one_vs_one multi-class mode does not support " "predicting probability estimates. Use " "one_vs_rest mode instead.") X = check_array(X) return self.base_estimator_.predict_proba(X) @property def kernel_(self): if self.n_classes_ == 2: return self.base_estimator_.kernel_ else: return CompoundKernel( [estimator.kernel_ for estimator in self.base_estimator_.estimators_]) def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or none Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ check_is_fitted(self, ["classes_", "n_classes_"]) if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ theta = np.asarray(theta) if self.n_classes_ == 2: return self.base_estimator_.log_marginal_likelihood( theta, eval_gradient) else: if eval_gradient: raise NotImplementedError( "Gradient of log-marginal-likelihood not implemented for " "multi-class GPC.") estimators = self.base_estimator_.estimators_ n_dims = estimators[0].kernel_.n_dims if theta.shape[0] == n_dims: # use same theta for all sub-kernels return np.mean( [estimator.log_marginal_likelihood(theta) for i, estimator in enumerate(estimators)]) elif theta.shape[0] == n_dims * self.classes_.shape[0]: # theta for compound kernel return np.mean( [estimator.log_marginal_likelihood( theta[n_dims * i:n_dims * (i + 1)]) for i, estimator in enumerate(estimators)]) else: raise ValueError("Shape of theta must be either %d or %d. " "Obtained theta with shape %d." % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]))
bsd-3-clause
xuewei4d/scikit-learn
sklearn/datasets/tests/test_openml.py
5
51973
"""Test the openml loader. """ import gzip import json import numpy as np import os import re import scipy.sparse import sklearn import pytest from sklearn import config_context from sklearn.datasets import fetch_openml from sklearn.datasets._openml import (_open_openml_url, _arff, _DATA_FILE, _convert_arff_data, _convert_arff_data_dataframe, _get_data_description_by_id, _get_local_path, _retry_with_clean_cache, _feature_to_dtype) from sklearn.utils._testing import (assert_warns_message, assert_raise_message) from sklearn.utils import is_scalar_nan from sklearn.utils._testing import assert_allclose, assert_array_equal from urllib.error import HTTPError from sklearn.datasets.tests.test_common import check_return_X_y from sklearn.externals._arff import ArffContainerType from functools import partial from sklearn.utils._testing import fails_if_pypy currdir = os.path.dirname(os.path.abspath(__file__)) # if True, urlopen will be monkey patched to only use local files test_offline = True def _test_features_list(data_id): # XXX Test is intended to verify/ensure correct decoding behavior # Not usable with sparse data or datasets that have columns marked as # {row_identifier, ignore} def decode_column(data_bunch, col_idx): col_name = data_bunch.feature_names[col_idx] if col_name in data_bunch.categories: # XXX: This would be faster with np.take, although it does not # handle missing values fast (also not with mode='wrap') cat = data_bunch.categories[col_name] result = [None if is_scalar_nan(idx) else cat[int(idx)] for idx in data_bunch.data[:, col_idx]] return np.array(result, dtype='O') else: # non-nominal attribute return data_bunch.data[:, col_idx] data_bunch = fetch_openml(data_id=data_id, cache=False, target_column=None, as_frame=False) # also obtain decoded arff data_description = _get_data_description_by_id(data_id, None) sparse = data_description['format'].lower() == 'sparse_arff' if sparse is True: raise ValueError('This test is not intended for sparse data, to keep ' 'code relatively simple') url = _DATA_FILE.format(data_description['file_id']) with _open_openml_url(url, data_home=None) as f: data_arff = _arff.load((line.decode('utf-8') for line in f), return_type=(_arff.COO if sparse else _arff.DENSE_GEN), encode_nominal=False) data_downloaded = np.array(list(data_arff['data']), dtype='O') for i in range(len(data_bunch.feature_names)): # XXX: Test per column, as this makes it easier to avoid problems with # missing values np.testing.assert_array_equal(data_downloaded[:, i], decode_column(data_bunch, i)) def _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, expected_data_dtype, expected_target_dtype, expect_sparse, compare_default_target): # fetches a dataset in three various ways from OpenML, using the # fetch_openml function, and does various checks on the validity of the # result. Note that this function can be mocked (by invoking # _monkey_patch_webbased_functions before invoking this function) data_by_name_id = fetch_openml(name=data_name, version=data_version, cache=False, as_frame=False) assert int(data_by_name_id.details['id']) == data_id # Please note that cache=False is crucial, as the monkey patched files are # not consistent with reality fetch_openml(name=data_name, cache=False, as_frame=False) # without specifying the version, there is no guarantee that the data id # will be the same # fetch with dataset id data_by_id = fetch_openml(data_id=data_id, cache=False, target_column=target_column, as_frame=False) assert data_by_id.details['name'] == data_name assert data_by_id.data.shape == (expected_observations, expected_features) if isinstance(target_column, str): # single target, so target is vector assert data_by_id.target.shape == (expected_observations, ) assert data_by_id.target_names == [target_column] elif isinstance(target_column, list): # multi target, so target is array assert data_by_id.target.shape == (expected_observations, len(target_column)) assert data_by_id.target_names == target_column assert data_by_id.data.dtype == expected_data_dtype assert data_by_id.target.dtype == expected_target_dtype assert len(data_by_id.feature_names) == expected_features for feature in data_by_id.feature_names: assert isinstance(feature, str) # TODO: pass in a list of expected nominal features for feature, categories in data_by_id.categories.items(): feature_idx = data_by_id.feature_names.index(feature) values = np.unique(data_by_id.data[:, feature_idx]) values = values[np.isfinite(values)] assert set(values) <= set(range(len(categories))) if compare_default_target: # check whether the data by id and data by id target are equal data_by_id_default = fetch_openml(data_id=data_id, cache=False, as_frame=False) np.testing.assert_allclose(data_by_id.data, data_by_id_default.data) if data_by_id.target.dtype == np.float64: np.testing.assert_allclose(data_by_id.target, data_by_id_default.target) else: assert np.array_equal(data_by_id.target, data_by_id_default.target) if expect_sparse: assert isinstance(data_by_id.data, scipy.sparse.csr_matrix) else: assert isinstance(data_by_id.data, np.ndarray) # np.isnan doesn't work on CSR matrix assert (np.count_nonzero(np.isnan(data_by_id.data)) == expected_missing) # test return_X_y option fetch_func = partial(fetch_openml, data_id=data_id, cache=False, target_column=target_column, as_frame=False) check_return_X_y(data_by_id, fetch_func) return data_by_id class _MockHTTPResponse: def __init__(self, data, is_gzip): self.data = data self.is_gzip = is_gzip def read(self, amt=-1): return self.data.read(amt) def close(self): self.data.close() def info(self): if self.is_gzip: return {'Content-Encoding': 'gzip'} return {} def __iter__(self): return iter(self.data) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): return False def _monkey_patch_webbased_functions(context, data_id, gzip_response): # monkey patches the urlopen function. Important note: Do NOT use this # in combination with a regular cache directory, as the files that are # stored as cache should not be mixed up with real openml datasets url_prefix_data_description = "https://openml.org/api/v1/json/data/" url_prefix_data_features = "https://openml.org/api/v1/json/data/features/" url_prefix_download_data = "https://openml.org/data/v1/" url_prefix_data_list = "https://openml.org/api/v1/json/data/list/" path_suffix = '.gz' read_fn = gzip.open def _file_name(url, suffix): return (re.sub(r'\W', '-', url[len("https://openml.org/"):]) + suffix + path_suffix) def _mock_urlopen_data_description(url, has_gzip_header): assert url.startswith(url_prefix_data_description) path = os.path.join(currdir, 'data', 'openml', str(data_id), _file_name(url, '.json')) if has_gzip_header and gzip_response: fp = open(path, 'rb') return _MockHTTPResponse(fp, True) else: fp = read_fn(path, 'rb') return _MockHTTPResponse(fp, False) def _mock_urlopen_data_features(url, has_gzip_header): assert url.startswith(url_prefix_data_features) path = os.path.join(currdir, 'data', 'openml', str(data_id), _file_name(url, '.json')) if has_gzip_header and gzip_response: fp = open(path, 'rb') return _MockHTTPResponse(fp, True) else: fp = read_fn(path, 'rb') return _MockHTTPResponse(fp, False) def _mock_urlopen_download_data(url, has_gzip_header): assert (url.startswith(url_prefix_download_data)) path = os.path.join(currdir, 'data', 'openml', str(data_id), _file_name(url, '.arff')) if has_gzip_header and gzip_response: fp = open(path, 'rb') return _MockHTTPResponse(fp, True) else: fp = read_fn(path, 'rb') return _MockHTTPResponse(fp, False) def _mock_urlopen_data_list(url, has_gzip_header): assert url.startswith(url_prefix_data_list) json_file_path = os.path.join(currdir, 'data', 'openml', str(data_id), _file_name(url, '.json')) # load the file itself, to simulate a http error json_data = json.loads(read_fn(json_file_path, 'rb'). read().decode('utf-8')) if 'error' in json_data: raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) if has_gzip_header: fp = open(json_file_path, 'rb') return _MockHTTPResponse(fp, True) else: fp = read_fn(json_file_path, 'rb') return _MockHTTPResponse(fp, False) def _mock_urlopen(request): url = request.get_full_url() has_gzip_header = request.get_header('Accept-encoding') == "gzip" if url.startswith(url_prefix_data_list): return _mock_urlopen_data_list(url, has_gzip_header) elif url.startswith(url_prefix_data_features): return _mock_urlopen_data_features(url, has_gzip_header) elif url.startswith(url_prefix_download_data): return _mock_urlopen_download_data(url, has_gzip_header) elif url.startswith(url_prefix_data_description): return _mock_urlopen_data_description(url, has_gzip_header) else: raise ValueError('Unknown mocking URL pattern: %s' % url) # XXX: Global variable if test_offline: context.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) @pytest.mark.parametrize('feature, expected_dtype', [ ({'data_type': 'string', 'number_of_missing_values': '0'}, object), ({'data_type': 'string', 'number_of_missing_values': '1'}, object), ({'data_type': 'numeric', 'number_of_missing_values': '0'}, np.float64), ({'data_type': 'numeric', 'number_of_missing_values': '1'}, np.float64), ({'data_type': 'real', 'number_of_missing_values': '0'}, np.float64), ({'data_type': 'real', 'number_of_missing_values': '1'}, np.float64), ({'data_type': 'integer', 'number_of_missing_values': '0'}, np.int64), ({'data_type': 'integer', 'number_of_missing_values': '1'}, np.float64), ({'data_type': 'nominal', 'number_of_missing_values': '0'}, 'category'), ({'data_type': 'nominal', 'number_of_missing_values': '1'}, 'category'), ]) def test_feature_to_dtype(feature, expected_dtype): assert _feature_to_dtype(feature) == expected_dtype @pytest.mark.parametrize('feature', [ {'data_type': 'datatime', 'number_of_missing_values': '0'} ]) def test_feature_to_dtype_error(feature): msg = 'Unsupported feature: {}'.format(feature) with pytest.raises(ValueError, match=msg): _feature_to_dtype(feature) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_iris_pandas(monkeypatch): # classification dataset with numeric only columns pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 61 data_shape = (150, 4) target_shape = (150, ) frame_shape = (150, 5) target_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']) data_dtypes = [np.float64] * 4 data_names = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth'] target_name = 'class' _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert np.all(data.dtypes == data_dtypes) assert data.shape == data_shape assert np.all(data.columns == data_names) assert np.all(bunch.feature_names == data_names) assert bunch.target_names == [target_name] assert isinstance(target, pd.Series) assert target.dtype == target_dtype assert target.shape == target_shape assert target.name == target_name assert target.index.is_unique assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape assert np.all(frame.dtypes == data_dtypes + [target_dtype]) assert frame.index.is_unique # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_iris_pandas_equal_to_no_frame(monkeypatch): # as_frame = True returns the same underlying data as as_frame = False pytest.importorskip('pandas') data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, True) frame_bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) frame_data = frame_bunch.data frame_target = frame_bunch.target norm_bunch = fetch_openml(data_id=data_id, as_frame=False, cache=False) norm_data = norm_bunch.data norm_target = norm_bunch.target assert_allclose(norm_data, frame_data) assert_array_equal(norm_target, frame_target) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_iris_multitarget_pandas(monkeypatch): # classification dataset with numeric only columns pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 61 data_shape = (150, 3) target_shape = (150, 2) frame_shape = (150, 5) target_column = ['petalwidth', 'petallength'] cat_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']) data_dtypes = [np.float64, np.float64] + [cat_dtype] data_names = ['sepallength', 'sepalwidth', 'class'] target_dtypes = [np.float64, np.float64] target_names = ['petalwidth', 'petallength'] _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, target_column=target_column) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert np.all(data.dtypes == data_dtypes) assert data.shape == data_shape assert np.all(data.columns == data_names) assert np.all(bunch.feature_names == data_names) assert bunch.target_names == target_names assert isinstance(target, pd.DataFrame) assert np.all(target.dtypes == target_dtypes) assert target.shape == target_shape assert np.all(target.columns == target_names) assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape assert np.all(frame.dtypes == [np.float64] * 4 + [cat_dtype]) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_anneal_pandas(monkeypatch): # classification dataset with numeric and categorical columns pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 2 target_column = 'class' data_shape = (11, 38) target_shape = (11,) frame_shape = (11, 39) expected_data_categories = 32 expected_data_floats = 6 _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, target_column=target_column, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape n_categories = len([dtype for dtype in data.dtypes if isinstance(dtype, CategoricalDtype)]) n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f']) assert expected_data_categories == n_categories assert expected_data_floats == n_floats assert isinstance(target, pd.Series) assert target.shape == target_shape assert isinstance(target.dtype, CategoricalDtype) assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_cpu_pandas(monkeypatch): # regression dataset with numeric and categorical columns pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 561 data_shape = (209, 7) target_shape = (209, ) frame_shape = (209, 8) cat_dtype = CategoricalDtype(['adviser', 'amdahl', 'apollo', 'basf', 'bti', 'burroughs', 'c.r.d', 'cdc', 'cambex', 'dec', 'dg', 'formation', 'four-phase', 'gould', 'hp', 'harris', 'honeywell', 'ibm', 'ipl', 'magnuson', 'microdata', 'nas', 'ncr', 'nixdorf', 'perkin-elmer', 'prime', 'siemens', 'sperry', 'sratus', 'wang']) data_dtypes = [cat_dtype] + [np.float64] * 6 feature_names = ['vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH', 'CHMIN', 'CHMAX'] target_name = 'class' _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape assert np.all(data.dtypes == data_dtypes) assert np.all(data.columns == feature_names) assert np.all(bunch.feature_names == feature_names) assert bunch.target_names == [target_name] assert isinstance(target, pd.Series) assert target.shape == target_shape assert target.dtype == np.float64 assert target.name == target_name assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape def test_fetch_openml_australian_pandas_error_sparse(monkeypatch): data_id = 292 _monkey_patch_webbased_functions(monkeypatch, data_id, True) msg = 'Cannot return dataframe with sparse data' with pytest.raises(ValueError, match=msg): fetch_openml(data_id=data_id, as_frame=True, cache=False) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_as_frame_auto(monkeypatch): pd = pytest.importorskip('pandas') data_id = 61 # iris dataset version 1 _monkey_patch_webbased_functions(monkeypatch, data_id, True) data = fetch_openml(data_id=data_id, as_frame='auto') assert isinstance(data.data, pd.DataFrame) data_id = 292 # Australian dataset version 1 _monkey_patch_webbased_functions(monkeypatch, data_id, True) data = fetch_openml(data_id=data_id, as_frame='auto') assert isinstance(data.data, scipy.sparse.csr_matrix) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch): pytest.importorskip('pandas') data_id = 1119 _monkey_patch_webbased_functions(monkeypatch, data_id, True) msg = 'Could not adhere to working_memory config.' with pytest.warns(UserWarning, match=msg): with config_context(working_memory=1e-6): fetch_openml(data_id=data_id, as_frame=True, cache=False) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_adultcensus_pandas_return_X_y(monkeypatch): pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 1119 data_shape = (10, 14) target_shape = (10, ) expected_data_categories = 8 expected_data_floats = 6 target_column = 'class' _monkey_patch_webbased_functions(monkeypatch, data_id, True) X, y = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True) assert isinstance(X, pd.DataFrame) assert X.shape == data_shape n_categories = len([dtype for dtype in X.dtypes if isinstance(dtype, CategoricalDtype)]) n_floats = len([dtype for dtype in X.dtypes if dtype.kind == 'f']) assert expected_data_categories == n_categories assert expected_data_floats == n_floats assert isinstance(y, pd.Series) assert y.shape == target_shape assert y.name == target_column # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_adultcensus_pandas(monkeypatch): pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype # Check because of the numeric row attribute (issue #12329) data_id = 1119 data_shape = (10, 14) target_shape = (10, ) frame_shape = (10, 15) expected_data_categories = 8 expected_data_floats = 6 target_column = 'class' _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape n_categories = len([dtype for dtype in data.dtypes if isinstance(dtype, CategoricalDtype)]) n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f']) assert expected_data_categories == n_categories assert expected_data_floats == n_floats assert isinstance(target, pd.Series) assert target.shape == target_shape assert target.name == target_column assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_miceprotein_pandas(monkeypatch): # JvR: very important check, as this dataset defined several row ids # and ignore attributes. Note that data_features json has 82 attributes, # and row id (1), ignore attributes (3) have been removed. pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 40966 data_shape = (7, 77) target_shape = (7, ) frame_shape = (7, 78) target_column = 'class' frame_n_categories = 1 frame_n_floats = 77 _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape assert np.all(data.dtypes == np.float64) assert isinstance(target, pd.Series) assert isinstance(target.dtype, CategoricalDtype) assert target.shape == target_shape assert target.name == target_column assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape n_categories = len([dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]) n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f']) assert frame_n_categories == n_categories assert frame_n_floats == n_floats # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_emotions_pandas(monkeypatch): # classification dataset with multiple targets (natively) pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 40589 target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm', 'quiet.still', 'sad.lonely', 'angry.aggresive'] data_shape = (13, 72) target_shape = (13, 6) frame_shape = (13, 78) expected_frame_categories = 6 expected_frame_floats = 72 _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, target_column=target_column) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape assert isinstance(target, pd.DataFrame) assert target.shape == target_shape assert np.all(target.columns == target_column) assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape n_categories = len([dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]) n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f']) assert expected_frame_categories == n_categories assert expected_frame_floats == n_floats # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy def test_fetch_openml_titanic_pandas(monkeypatch): # dataset with strings pd = pytest.importorskip('pandas') CategoricalDtype = pd.api.types.CategoricalDtype data_id = 40945 data_shape = (1309, 13) target_shape = (1309, ) frame_shape = (1309, 14) name_to_dtype = { 'pclass': np.float64, 'name': object, 'sex': CategoricalDtype(['female', 'male']), 'age': np.float64, 'sibsp': np.float64, 'parch': np.float64, 'ticket': object, 'fare': np.float64, 'cabin': object, 'embarked': CategoricalDtype(['C', 'Q', 'S']), 'boat': object, 'body': np.float64, 'home.dest': object, 'survived': CategoricalDtype(['0', '1']) } frame_columns = ['pclass', 'survived', 'name', 'sex', 'age', 'sibsp', 'parch', 'ticket', 'fare', 'cabin', 'embarked', 'boat', 'body', 'home.dest'] frame_dtypes = [name_to_dtype[col] for col in frame_columns] feature_names = ['pclass', 'name', 'sex', 'age', 'sibsp', 'parch', 'ticket', 'fare', 'cabin', 'embarked', 'boat', 'body', 'home.dest'] target_name = 'survived' _monkey_patch_webbased_functions(monkeypatch, data_id, True) bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False) data = bunch.data target = bunch.target frame = bunch.frame assert isinstance(data, pd.DataFrame) assert data.shape == data_shape assert np.all(data.columns == feature_names) assert bunch.target_names == [target_name] assert isinstance(target, pd.Series) assert target.shape == target_shape assert target.name == target_name assert target.dtype == name_to_dtype[target_name] assert isinstance(frame, pd.DataFrame) assert frame.shape == frame_shape assert np.all(frame.dtypes == frame_dtypes) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_iris(monkeypatch, gzip_response): # classification dataset with numeric only columns data_id = 61 data_name = 'iris' data_version = 1 target_column = 'class' expected_observations = 150 expected_features = 4 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_warns_message( UserWarning, "Multiple active versions of the dataset matching the name" " iris exist. Versions may be fundamentally different, " "returning version 1.", _fetch_dataset_from_openml, **{'data_id': data_id, 'data_name': data_name, 'data_version': data_version, 'target_column': target_column, 'expected_observations': expected_observations, 'expected_features': expected_features, 'expected_missing': expected_missing, 'expect_sparse': False, 'expected_data_dtype': np.float64, 'expected_target_dtype': object, 'compare_default_target': True} ) def test_decode_iris(monkeypatch): data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, False) _test_features_list(data_id) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response): # classification dataset with numeric only columns data_id = 61 data_name = 'iris' data_version = 1 target_column = ['sepallength', 'sepalwidth'] expected_observations = 150 expected_features = 3 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, np.float64, expect_sparse=False, compare_default_target=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_anneal(monkeypatch, gzip_response): # classification dataset with numeric and categorical columns data_id = 2 data_name = 'anneal' data_version = 1 target_column = 'class' # Not all original instances included for space reasons expected_observations = 11 expected_features = 38 expected_missing = 267 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, object, expect_sparse=False, compare_default_target=True) def test_decode_anneal(monkeypatch): data_id = 2 _monkey_patch_webbased_functions(monkeypatch, data_id, False) _test_features_list(data_id) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response): # classification dataset with numeric and categorical columns data_id = 2 data_name = 'anneal' data_version = 1 target_column = ['class', 'product-type', 'shape'] # Not all original instances included for space reasons expected_observations = 11 expected_features = 36 expected_missing = 267 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, object, expect_sparse=False, compare_default_target=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_cpu(monkeypatch, gzip_response): # regression dataset with numeric and categorical columns data_id = 561 data_name = 'cpu' data_version = 1 target_column = 'class' expected_observations = 209 expected_features = 7 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, np.float64, expect_sparse=False, compare_default_target=True) def test_decode_cpu(monkeypatch): data_id = 561 _monkey_patch_webbased_functions(monkeypatch, data_id, False) _test_features_list(data_id) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_australian(monkeypatch, gzip_response): # sparse dataset # Australian is the only sparse dataset that is reasonably small # as it is inactive, we need to catch the warning. Due to mocking # framework, it is not deactivated in our tests data_id = 292 data_name = 'Australian' data_version = 1 target_column = 'Y' # Not all original instances included for space reasons expected_observations = 85 expected_features = 14 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_warns_message( UserWarning, "Version 1 of dataset Australian is inactive,", _fetch_dataset_from_openml, **{'data_id': data_id, 'data_name': data_name, 'data_version': data_version, 'target_column': target_column, 'expected_observations': expected_observations, 'expected_features': expected_features, 'expected_missing': expected_missing, 'expect_sparse': True, 'expected_data_dtype': np.float64, 'expected_target_dtype': object, 'compare_default_target': False} # numpy specific check ) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_adultcensus(monkeypatch, gzip_response): # Check because of the numeric row attribute (issue #12329) data_id = 1119 data_name = 'adult-census' data_version = 1 target_column = 'class' # Not all original instances included for space reasons expected_observations = 10 expected_features = 14 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, object, expect_sparse=False, compare_default_target=True) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_miceprotein(monkeypatch, gzip_response): # JvR: very important check, as this dataset defined several row ids # and ignore attributes. Note that data_features json has 82 attributes, # and row id (1), ignore attributes (3) have been removed (and target is # stored in data.target) data_id = 40966 data_name = 'MiceProtein' data_version = 4 target_column = 'class' # Not all original instances included for space reasons expected_observations = 7 expected_features = 77 expected_missing = 7 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, object, expect_sparse=False, compare_default_target=True) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_emotions(monkeypatch, gzip_response): # classification dataset with multiple targets (natively) data_id = 40589 data_name = 'emotions' data_version = 3 target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm', 'quiet.still', 'sad.lonely', 'angry.aggresive'] expected_observations = 13 expected_features = 72 expected_missing = 0 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) _fetch_dataset_from_openml(data_id, data_name, data_version, target_column, expected_observations, expected_features, expected_missing, np.float64, object, expect_sparse=False, compare_default_target=True) def test_decode_emotions(monkeypatch): data_id = 40589 _monkey_patch_webbased_functions(monkeypatch, data_id, False) _test_features_list(data_id) @pytest.mark.parametrize('gzip_response', [True, False]) def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir): data_id = 61 _monkey_patch_webbased_functions( monkeypatch, data_id, gzip_response) openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) cache_directory = str(tmpdir.mkdir('scikit_learn_data')) # first fill the cache response1 = _open_openml_url(openml_path, cache_directory) # assert file exists location = _get_local_path(openml_path, cache_directory) assert os.path.isfile(location) # redownload, to utilize cache response2 = _open_openml_url(openml_path, cache_directory) assert response1.read() == response2.read() @pytest.mark.parametrize('gzip_response', [True, False]) @pytest.mark.parametrize('write_to_disk', [True, False]) def test_open_openml_url_unlinks_local_path( monkeypatch, gzip_response, tmpdir, write_to_disk): data_id = 61 openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) cache_directory = str(tmpdir.mkdir('scikit_learn_data')) location = _get_local_path(openml_path, cache_directory) def _mock_urlopen(request): if write_to_disk: with open(location, "w") as f: f.write("") raise ValueError("Invalid request") monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) with pytest.raises(ValueError, match="Invalid request"): _open_openml_url(openml_path, cache_directory) assert not os.path.exists(location) def test_retry_with_clean_cache(tmpdir): data_id = 61 openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) cache_directory = str(tmpdir.mkdir('scikit_learn_data')) location = _get_local_path(openml_path, cache_directory) os.makedirs(os.path.dirname(location)) with open(location, 'w') as f: f.write("") @_retry_with_clean_cache(openml_path, cache_directory) def _load_data(): # The first call will raise an error since location exists if os.path.exists(location): raise Exception("File exist!") return 1 warn_msg = "Invalid cache, redownloading file" with pytest.warns(RuntimeWarning, match=warn_msg): result = _load_data() assert result == 1 def test_retry_with_clean_cache_http_error(tmpdir): data_id = 61 openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) cache_directory = str(tmpdir.mkdir('scikit_learn_data')) @_retry_with_clean_cache(openml_path, cache_directory) def _load_data(): raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) error_msg = "Simulated mock error" with pytest.raises(HTTPError, match=error_msg): _load_data() @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir): def _mock_urlopen_raise(request): raise ValueError('This mechanism intends to test correct cache' 'handling. As such, urlopen should never be ' 'accessed. URL: %s' % request.get_full_url()) data_id = 2 cache_directory = str(tmpdir.mkdir('scikit_learn_data')) _monkey_patch_webbased_functions( monkeypatch, data_id, gzip_response) X_fetched, y_fetched = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False) monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen_raise) X_cached, y_cached = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False) np.testing.assert_array_equal(X_fetched, X_cached) np.testing.assert_array_equal(y_fetched, y_cached) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_notarget(monkeypatch, gzip_response): data_id = 61 target_column = None expected_observations = 150 expected_features = 5 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) data = fetch_openml(data_id=data_id, target_column=target_column, cache=False, as_frame=False) assert data.data.shape == (expected_observations, expected_features) assert data.target is None @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_inactive(monkeypatch, gzip_response): # fetch inactive dataset by id data_id = 40675 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) glas2 = assert_warns_message( UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml, data_id=data_id, cache=False, as_frame=False) # fetch inactive dataset by name and version assert glas2.data.shape == (163, 9) glas2_by_version = assert_warns_message( UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml, data_id=None, name="glass2", version=1, cache=False, as_frame=False) assert int(glas2_by_version.details['id']) == data_id @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_nonexiting(monkeypatch, gzip_response): # there is no active version of glass2 data_id = 40675 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) # Note that we only want to search by name (not data id) assert_raise_message(ValueError, "No active dataset glass2 found", fetch_openml, name='glass2', cache=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_raises_illegal_multitarget(monkeypatch, gzip_response): data_id = 61 targets = ['sepalwidth', 'class'] _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) # Note that we only want to search by name (not data id) assert_raise_message(ValueError, "Can only handle homogeneous multi-target datasets,", fetch_openml, data_id=data_id, target_column=targets, cache=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_warn_ignore_attribute(monkeypatch, gzip_response): data_id = 40966 expected_row_id_msg = "target_column={} has flag is_row_identifier." expected_ignore_msg = "target_column={} has flag is_ignore." _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) # single column test assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'), fetch_openml, data_id=data_id, target_column='MouseID', cache=False, as_frame=False) assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'), fetch_openml, data_id=data_id, target_column='Genotype', cache=False, as_frame=False) # multi column test assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'), fetch_openml, data_id=data_id, target_column=['MouseID', 'class'], cache=False, as_frame=False) assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'), fetch_openml, data_id=data_id, target_column=['Genotype', 'class'], cache=False, as_frame=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_string_attribute_without_dataframe(monkeypatch, gzip_response): data_id = 40945 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) # single column test assert_raise_message(ValueError, ('STRING attributes are not supported for ' 'array representation. Try as_frame=True'), fetch_openml, data_id=data_id, cache=False, as_frame=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_dataset_with_openml_error(monkeypatch, gzip_response): data_id = 1 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_warns_message( UserWarning, "OpenML registered a problem with the dataset. It might be unusable. " "Error:", fetch_openml, data_id=data_id, cache=False, as_frame=False ) @pytest.mark.parametrize('gzip_response', [True, False]) def test_dataset_with_openml_warning(monkeypatch, gzip_response): data_id = 3 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_warns_message( UserWarning, "OpenML raised a warning on the dataset. It might be unusable. " "Warning:", fetch_openml, data_id=data_id, cache=False, as_frame=False ) @pytest.mark.parametrize('gzip_response', [True, False]) def test_illegal_column(monkeypatch, gzip_response): data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_raise_message(KeyError, "Could not find target_column=", fetch_openml, data_id=data_id, target_column='undefined', cache=False) assert_raise_message(KeyError, "Could not find target_column=", fetch_openml, data_id=data_id, target_column=['undefined', 'class'], cache=False) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response): data_id = 2 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) assert_raise_message(ValueError, "Target column ", fetch_openml, data_id=data_id, target_column='family') def test_fetch_openml_raises_illegal_argument(): assert_raise_message(ValueError, "Dataset data_id=", fetch_openml, data_id=-1, name="name") assert_raise_message(ValueError, "Dataset data_id=", fetch_openml, data_id=-1, name=None, version="version") assert_raise_message(ValueError, "Dataset data_id=", fetch_openml, data_id=-1, name="name", version="version") assert_raise_message(ValueError, "Neither name nor data_id are provided. " "Please provide name or data_id.", fetch_openml) @pytest.mark.parametrize('gzip_response', [True, False]) def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response): # Regression test for #14340 # 62 is the ID of the ZOO dataset data_id = 62 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) dataset = sklearn.datasets.fetch_openml(data_id=data_id, cache=False, as_frame=False) assert dataset is not None # The dataset has 17 features, including 1 ignored (animal), # so we assert that we don't have the ignored feature in the final Bunch assert dataset['data'].shape == (101, 16) assert 'animal' not in dataset['feature_names'] # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy @pytest.mark.parametrize('as_frame', [True, False]) def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir): if as_frame: pytest.importorskip('pandas') data_id = 2 _monkey_patch_webbased_functions(monkeypatch, data_id, True) # create a temporary modified arff file dataset_dir = os.path.join(currdir, 'data', 'openml', str(data_id)) original_data_path = os.path.join(dataset_dir, 'data-v1-download-1666876.arff.gz') corrupt_copy = os.path.join(tmpdir, "test_invalid_checksum.arff") with gzip.GzipFile(original_data_path, "rb") as orig_gzip, \ gzip.GzipFile(corrupt_copy, "wb") as modified_gzip: data = bytearray(orig_gzip.read()) data[len(data)-1] = 37 modified_gzip.write(data) # Requests are already mocked by monkey_patch_webbased_functions. # We want to re-use that mock for all requests except file download, # hence creating a thin mock over the original mock mocked_openml_url = sklearn.datasets._openml.urlopen def swap_file_mock(request): url = request.get_full_url() if url.endswith('data/v1/download/1666876'): return _MockHTTPResponse(open(corrupt_copy, "rb"), is_gzip=True) else: return mocked_openml_url(request) monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', swap_file_mock) # validate failed checksum with pytest.raises(ValueError) as exc: sklearn.datasets.fetch_openml(data_id=data_id, cache=False, as_frame=as_frame) # exception message should have file-path assert exc.match("1666876") def test_convert_arff_data_type(): pytest.importorskip('pandas') arff: ArffContainerType = { 'data': (el for el in range(2)), 'description': '', 'relation': '', 'attributes': [] } msg = r"shape must be provided when arr\['data'\] is a Generator" with pytest.raises(ValueError, match=msg): _convert_arff_data(arff, [0], [0], shape=None) arff = { 'data': list(range(2)), 'description': '', 'relation': '', 'attributes': [] } msg = r"arff\['data'\] must be a generator when converting to pd.DataFrame" with pytest.raises(ValueError, match=msg): _convert_arff_data_dataframe(arff, ['a'], {})
bsd-3-clause
actlea/TopicalCrawler
TopicalCrawl/TopicalCrawl/classifier/increment_classifier.py
1
2474
#!/usr/bin/env python # encoding: utf-8 """ @version: ?? @author: phpergao @license: Apache Licence @file: increment_classifier.py @time: 16-4-15 上午10:18 """ from base import * from sklearn.externals import joblib import os import numpy as np import re from collections import Counter from collections import Counter from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.feature_selection import SelectKBest, chi2 def get_bn_ratios(X_train, y_train, v): print v matrix = X_train.toarray() class_id = set(y_train) ratios = np.ones((len(class_id), v)) counters = {} for i in range(matrix.shape[0]): for j in range(matrix.shape[1]): print matrix[i][j], print '' # for c in class_id: # p_c = np.full(v, 1.0) # for t in range(v): # p_c[t] += counters[t] # # normalize (l1 norm) # p_c /= np.linalg.norm(p_c, ord=1) # ratios[c] = np.log(p_c / (1 - p_c)) # print ratios, v class IncrementalClassifier(object): def __init__(self, name): self.name = name self.model = None self.converter = TextPreProcess(zh_tokenize) def train_and_test(self, train_file, test_file): lines = read_text_src(train_file) lines = [x for x in lines if len(x) > 1] X_train = [line[1] for line in lines] y_train = [line[0] for line in lines] # lines = read_text_src(test_file) # lines = [x for x in lines if len(x) > 1] # X_test = [line[1] for line in lines] # y_test = [line[0] for line in lines] vectorizer = CountVectorizer(tokenizer=zh_tokenize) # ngram_range=(1,2) X_train = vectorizer.fit_transform(X_train) print type(X_train) # X_test = vectorizer.transform(X_test) word = vectorizer.get_feature_names() v = len(word) get_bn_ratios(X_train,y_train,v) N = X_train.shape[1] ch2 = SelectKBest(chi2, k=int(N * 0.2)) X_train = ch2.fit_transform(X_train, y_train) feature_names = [word[i] for i in ch2.get_support(indices=True)] if __name__=='__main__': path = '/mnt/UbutunShare/graduate/DataSet/document1.txt' test_file = 'sample-data/test-6-zh.txt' inc = IncrementalClassifier('test') inc.train_and_test(path,test_file)
gpl-3.0
danielhrisca/asammdf
asammdf/signal.py
1
50266
# -*- coding: utf-8 -*- """ asammdf *Signal* class module for time correct signal processing """ import logging from textwrap import fill import numpy as np from numpy.core.defchararray import encode from .blocks import v2_v3_blocks as v3b from .blocks import v4_blocks as v4b from .blocks.conversion_utils import from_dict from .blocks.source_utils import Source from .blocks.utils import extract_cncomment_xml, MdfException from .version import __version__ logger = logging.getLogger("asammdf") class Signal(object): """ The *Signal* represents a channel described by it's samples and timestamps. It can perform arithmetic operations against other *Signal* or numeric types. The operations are computed in respect to the timestamps (time correct). The non-float signals are not interpolated, instead the last value relative to the current timestamp is used. *samples*, *timestamps* and *name* are mandatory arguments. Parameters ---------- samples : numpy.array | list | tuple signal samples timestamps : numpy.array | list | tuple signal timestamps unit : str signal unit name : str signal name conversion : dict | channel conversion block dict that contains extra conversion information about the signal , default *None* comment : str signal comment, default '' raw : bool signal samples are raw values, with no physical conversion applied master_metadata : list master name and sync type display_name : str display name used by mdf version 3 attachment : bytes, name channel attachment and name from MDF version 4 source : Source source information named tuple bit_count : int bit count; useful for integer channels stream_sync : bool the channel is a synchronisation for the attachment stream (mdf v4 only) invalidation_bits : numpy.array | None channel invalidation bits, default *None* encoding : str | None encoding for string signals; default *None* """ def __init__( self, samples=None, timestamps=None, unit="", name="", conversion=None, comment="", raw=True, master_metadata=None, display_name="", attachment=(), source=None, bit_count=None, stream_sync=False, invalidation_bits=None, encoding=None, group_index=-1, channel_index=-1, ): if samples is None or timestamps is None or not name: message = ( '"samples", "timestamps" and "name" are mandatory ' "for Signal class __init__: samples={samples}\n" "timestamps={timestamps}\nname={name}" ) raise MdfException(message) else: if not isinstance(samples, np.ndarray): samples = np.array(samples) if samples.dtype.kind == "U": if encoding is None: encodings = ["utf-8", "latin-1"] else: encodings = [encoding, "utf-8", "latin-1"] for encoding in encodings: try: samples = encode(samples, encoding) break except: continue else: samples = encode(samples, encodings[0], errors="ignore") if not isinstance(timestamps, np.ndarray): timestamps = np.array(timestamps, dtype=np.float64) if samples.shape[0] != timestamps.shape[0]: message = "{} samples and timestamps length mismatch ({} vs {})" message = message.format(name, samples.shape[0], timestamps.shape[0]) logger.exception(message) raise MdfException(message) self.samples = samples self.timestamps = timestamps self.unit = unit self.name = name self.comment = comment self._plot_axis = None self.raw = raw self.master_metadata = master_metadata self.display_name = display_name self.attachment = attachment self.encoding = encoding self.group_index = group_index self.channel_index = channel_index if source: if not isinstance(source, Source): source = Source.from_source(source) self.source = source if bit_count is None: self.bit_count = samples.dtype.itemsize * 8 else: self.bit_count = bit_count self.stream_sync = stream_sync if invalidation_bits is not None: if not isinstance(invalidation_bits, np.ndarray): invalidation_bits = np.array(invalidation_bits) if invalidation_bits.shape[0] != samples.shape[0]: message = ( "{} samples and invalidation bits length mismatch ({} vs {})" ) message = message.format( name, samples.shape[0], invalidation_bits.shape[0] ) logger.exception(message) raise MdfException(message) self.invalidation_bits = invalidation_bits if conversion: if not isinstance( conversion, (v4b.ChannelConversion, v3b.ChannelConversion) ): conversion = from_dict(conversion) self.conversion = conversion def __repr__(self): return f"""<Signal {self.name}: \tsamples={self.samples} \ttimestamps={self.timestamps} \tinvalidation_bits={self.invalidation_bits} \tunit="{self.unit}" \tconversion={self.conversion} \tsource={self.source} \tcomment="{self.comment}" \tmastermeta="{self.master_metadata}" \traw={self.raw} \tdisplay_name={self.display_name} \tattachment={self.attachment}> """ def plot(self, validate=True, index_only=False): """plot Signal samples. Pyqtgraph is used if it is available; in this case see the GUI plot documentation to see the available commands Parameters ---------- validate (True): bool apply the invalidation bits index_only (False) : bool use index based X axis. This can be useful if the master (usually time based) is corrupted with NaN, inf or if it is not strictly increasing """ try: from .gui.plot import plot plot(self, validate=True, index_only=False) return except: try: import matplotlib.pyplot as plt from matplotlib.widgets import Slider except ImportError: logging.warning("Signal plotting requires pyqtgraph or matplotlib") return if len(self.samples.shape) <= 1 and self.samples.dtype.names is None: fig = plt.figure() fig.canvas.set_window_title(self.name) fig.text( 0.95, 0.05, f"asammdf {__version__}", fontsize=8, color="red", ha="right", va="top", alpha=0.5, ) name = self.name if self.comment: comment = self.comment.replace("$", "") comment = extract_cncomment_xml(comment) comment = fill(comment, 120).replace("\\n", " ") title = f"{name}\n({comment})" plt.title(title) else: plt.title(name) try: if not self.master_metadata: plt.xlabel("Time [s]") plt.ylabel(f"[{self.unit}]") plt.plot(self.timestamps, self.samples, "b") plt.plot(self.timestamps, self.samples, "b.") plt.grid(True) plt.show() else: master_name, sync_type = self.master_metadata if sync_type in (0, 1): plt.xlabel(f"{master_name} [s]") elif sync_type == 2: plt.xlabel(f"{master_name} [deg]") elif sync_type == 3: plt.xlabel(f"{master_name} [m]") elif sync_type == 4: plt.xlabel(f"{master_name} [index]") plt.ylabel(f"[{self.unit}]") plt.plot(self.timestamps, self.samples, "b") plt.plot(self.timestamps, self.samples, "b.") plt.grid(True) plt.show() except ValueError: plt.close(fig) else: try: names = self.samples.dtype.names if self.samples.dtype.names is None or len(names) == 1: if names: samples = self.samples[names[0]] else: samples = self.samples shape = samples.shape[1:] fig = plt.figure() fig.canvas.set_window_title(self.name) fig.text( 0.95, 0.05, f"asammdf {__version__}", fontsize=8, color="red", ha="right", va="top", alpha=0.5, ) if self.comment: comment = self.comment.replace("$", "") plt.title(f"{self.name}\n({comment})") else: plt.title(self.name) ax = fig.add_subplot(111, projection="3d") # Grab some test data. X = np.array(range(shape[1])) Y = np.array(range(shape[0])) X, Y = np.meshgrid(X, Y) Z = samples[0] # Plot a basic wireframe. self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1) # Place Sliders on Graph ax_a = plt.axes([0.25, 0.1, 0.65, 0.03]) # Create Sliders & Determine Range sa = Slider( ax_a, "Time [s]", self.timestamps[0], self.timestamps[-1], valinit=self.timestamps[0], ) def update(val): self._plot_axis.remove() idx = np.searchsorted(self.timestamps, sa.val, side="right") Z = samples[idx - 1] self._plot_axis = ax.plot_wireframe( X, Y, Z, rstride=1, cstride=1 ) fig.canvas.draw_idle() sa.on_changed(update) plt.show() else: fig = plt.figure() fig.canvas.set_window_title(self.name) fig.text( 0.95, 0.05, f"asammdf {__version__}", fontsize=8, color="red", ha="right", va="top", alpha=0.5, ) if self.comment: comment = self.comment.replace("$", "") plt.title(f"{self.name}\n({comment})") else: plt.title(self.name) ax = fig.add_subplot(111, projection="3d") samples = self.samples[names[0]] axis1 = self.samples[names[1]] axis2 = self.samples[names[2]] # Grab some test data. X, Y = np.meshgrid(axis2[0], axis1[0]) Z = samples[0] # Plot a basic wireframe. self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1) # Place Sliders on Graph ax_a = plt.axes([0.25, 0.1, 0.65, 0.03]) # Create Sliders & Determine Range sa = Slider( ax_a, "Time [s]", self.timestamps[0], self.timestamps[-1], valinit=self.timestamps[0], ) def update(val): self._plot_axis.remove() idx = np.searchsorted(self.timestamps, sa.val, side="right") Z = samples[idx - 1] X, Y = np.meshgrid(axis2[idx - 1], axis1[idx - 1]) self._plot_axis = ax.plot_wireframe( X, Y, Z, rstride=1, cstride=1 ) fig.canvas.draw_idle() sa.on_changed(update) plt.show() except Exception as err: print(err) def cut( self, start=None, stop=None, include_ends=True, interpolation_mode=None, integer_interpolation_mode=None, float_interpolation_mode=1, ): """ Cuts the signal according to the *start* and *stop* values, by using the insertion indexes in the signal's *time* axis. Parameters ---------- start : float start timestamp for cutting stop : float stop timestamp for cutting include_ends : bool include the *start* and *stop* timestamps after cutting the signal. If *start* and *stop* are found in the original timestamps, then the new samples will be computed using interpolation. Default *True* interpolation_mode : int interpolation mode for integer signals; default 0. You should use the new *integer_interpolation_mode* argument since this will be deprecated in a later release * 0 - repeat previous samples * 1 - linear interpolation * 2 - hybrid interpolation: channels with integer data type (raw values) that have a conversion that outputs float values will use linear interpolation, otherwise the previous sample is used .. versionchanged:: 6.2.0 added hybrid mode interpolation integer_interpolation_mode : int interpolation mode for integer signals; default 0 * 0 - repeat previous samples * 1 - linear interpolation * 2 - hybrid interpolation: channels with integer data type (raw values) that have a conversion that outputs float values will use linear interpolation, otherwise the previous sample is used .. versionadded:: 6.2.0 float_interpolation_mode : int interpolation mode for float channels; default 1 * 0 - repeat previous sample * 1 - use linear interpolation .. versionadded:: 6.2.0 Returns ------- result : Signal new *Signal* cut from the original Examples -------- >>> new_sig = old_sig.cut(1.0, 10.5) >>> new_sig.timestamps[0], new_sig.timestamps[-1] 0.98, 10.48 """ if integer_interpolation_mode is None: if interpolation_mode is not None: integer_interpolation_mode = interpolation_mode else: integer_interpolation_mode = 0 else: integer_interpolation_mode = 0 if integer_interpolation_mode not in (0, 1, 2): raise MdfException("Integer interpolation mode should be one of (0, 1, 2)") if float_interpolation_mode not in (0, 1): raise MdfException("Float interpolation mode should be one of (0, 1)") ends = (start, stop) if len(self) == 0: result = Signal( np.array([], dtype=self.samples.dtype), np.array([], dtype=self.timestamps.dtype), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) elif start is None and stop is None: # return the channel uncut result = Signal( self.samples.copy(), self.timestamps.copy(), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=self.invalidation_bits.copy() if self.invalidation_bits is not None else None, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: if start is None: # cut from begining to stop if stop < self.timestamps[0]: result = Signal( np.array([], dtype=self.samples.dtype), np.array([], dtype=self.timestamps.dtype), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: stop = np.searchsorted(self.timestamps, stop, side="right") if ( include_ends and ends[-1] not in self.timestamps and ends[-1] < self.timestamps[-1] ): interpolated = self.interp( [ends[1]], integer_interpolation_mode=integer_interpolation_mode, float_interpolation_mode=float_interpolation_mode, ) samples = np.append( self.samples[:stop], interpolated.samples, axis=0 ) timestamps = np.append(self.timestamps[:stop], ends[1]) if self.invalidation_bits is not None: invalidation_bits = np.append( self.invalidation_bits[:stop], interpolated.invalidation_bits, ) else: invalidation_bits = None else: samples = self.samples[:stop].copy() timestamps = self.timestamps[:stop].copy() if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[:stop].copy() else: invalidation_bits = None result = Signal( samples, timestamps, self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) elif stop is None: # cut from start to end if start > self.timestamps[-1]: result = Signal( np.array([], dtype=self.samples.dtype), np.array([], dtype=self.timestamps.dtype), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: start = np.searchsorted(self.timestamps, start, side="left") if ( include_ends and ends[0] not in self.timestamps and ends[0] > self.timestamps[0] ): interpolated = self.interp( [ends[0]], integer_interpolation_mode=integer_interpolation_mode, float_interpolation_mode=float_interpolation_mode, ) samples = np.append( interpolated.samples, self.samples[start:], axis=0 ) timestamps = np.append(ends[0], self.timestamps[start:]) if self.invalidation_bits is not None: invalidation_bits = np.append( interpolated.invalidation_bits, self.invalidation_bits[start:], ) else: invalidation_bits = None else: samples = self.samples[start:].copy() timestamps = self.timestamps[start:].copy() if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[start:].copy() else: invalidation_bits = None result = Signal( samples, timestamps, self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: # cut between start and stop if start > self.timestamps[-1] or stop < self.timestamps[0]: result = Signal( np.array([], dtype=self.samples.dtype), np.array([], dtype=self.timestamps.dtype), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: start = np.searchsorted(self.timestamps, start, side="left") stop = np.searchsorted(self.timestamps, stop, side="right") if start == stop: if include_ends: interpolated = self.interp( np.unique(ends), integer_interpolation_mode=integer_interpolation_mode, float_interpolation_mode=float_interpolation_mode, ) samples = interpolated.samples timestamps = np.array( np.unique(ends), dtype=self.timestamps.dtype ) invalidation_bits = interpolated.invalidation_bits else: samples = np.array([], dtype=self.samples.dtype) timestamps = np.array([], dtype=self.timestamps.dtype) if self.invalidation_bits is not None: invalidation_bits = np.array([], dtype=bool) else: invalidation_bits = None else: samples = self.samples[start:stop].copy() timestamps = self.timestamps[start:stop].copy() if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[ start:stop ].copy() else: invalidation_bits = None if ( include_ends and ends[-1] not in self.timestamps and ends[-1] < self.timestamps[-1] ): interpolated = self.interp( [ends[1]], integer_interpolation_mode=integer_interpolation_mode, float_interpolation_mode=float_interpolation_mode, ) samples = np.append(samples, interpolated.samples, axis=0) timestamps = np.append(timestamps, ends[1]) if invalidation_bits is not None: invalidation_bits = np.append( invalidation_bits, interpolated.invalidation_bits ) if ( include_ends and ends[0] not in self.timestamps and ends[0] > self.timestamps[0] ): interpolated = self.interp( [ends[0]], integer_interpolation_mode=integer_interpolation_mode, float_interpolation_mode=float_interpolation_mode, ) samples = np.append(interpolated.samples, samples, axis=0) timestamps = np.append(ends[0], timestamps) if invalidation_bits is not None: invalidation_bits = np.append( interpolated.invalidation_bits, invalidation_bits ) result = Signal( samples, timestamps, self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) return result def extend(self, other): """extend signal with samples from another signal Parameters ---------- other : Signal Returns ------- signal : Signal new extended *Signal* """ if len(self.timestamps): last_stamp = self.timestamps[-1] else: last_stamp = 0 if len(other): other_first_sample = other.timestamps[0] if last_stamp >= other_first_sample: timestamps = other.timestamps + last_stamp else: timestamps = other.timestamps if self.invalidation_bits is None and other.invalidation_bits is None: invalidation_bits = None elif self.invalidation_bits is None and other.invalidation_bits is not None: invalidation_bits = np.concatenate( (np.zeros(len(self), dtype=bool), other.invalidation_bits) ) elif self.invalidation_bits is not None and other.invalidation_bits is None: invalidation_bits = np.concatenate( (self.invalidation_bits, np.zeros(len(other), dtype=bool)) ) else: invalidation_bits = np.append( self.invalidation_bits, other.invalidation_bits ) result = Signal( np.append(self.samples, other.samples, axis=0), np.append(self.timestamps, timestamps), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: result = self return result def interp( self, new_timestamps, interpolation_mode=None, integer_interpolation_mode=None, float_interpolation_mode=1, ): """returns a new *Signal* interpolated using the *new_timestamps* Parameters ---------- new_timestamps : np.array timestamps used for interpolation interpolation_mode : int interpolation mode for integer signals; default 0. You should use the new *integer_interpolation_mode* argument since this will be deprecated in a later release * 0 - repeat previous samples * 1 - linear interpolation * 2 - hybrid interpolation: channels with integer data type (raw values) that have a conversion that outputs float values will use linear interpolation, otherwise the previous sample is used .. versionchanged:: 6.2.0 added hybrid mode interpolation integer_interpolation_mode : int interpolation mode for integer signals; default 0 * 0 - repeat previous samples * 1 - linear interpolation * 2 - hybrid interpolation: channels with integer data type (raw values) that have a conversion that outputs float values will use linear interpolation, otherwise the previous sample is used .. versionadded:: 6.2.0 float_interpolation_mode : int interpolation mode for float channels; default 1 * 0 - repeat previous sample * 1 - use linear interpolation .. versionadded:: 6.2.0 Returns ------- signal : Signal new interpolated *Signal* """ if integer_interpolation_mode is None: if interpolation_mode is not None: integer_interpolation_mode = interpolation_mode else: integer_interpolation_mode = 0 else: integer_interpolation_mode = 0 if integer_interpolation_mode not in (0, 1, 2): raise MdfException("Integer interpolation mode should be one of (0, 1, 2)") if float_interpolation_mode not in (0, 1): raise MdfException("Float interpolation mode should be one of (0, 1)") if not len(self.samples) or not len(new_timestamps): return Signal( self.samples[:0].copy(), self.timestamps[:0].copy(), self.unit, self.name, comment=self.comment, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=None, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) else: if len(self.samples.shape) > 1: idx = np.searchsorted(self.timestamps, new_timestamps, side="right") idx -= 1 idx = np.clip(idx, 0, idx[-1]) s = self.samples[idx] if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None else: kind = self.samples.dtype.kind if kind == "f": if float_interpolation_mode == 0: idx = np.searchsorted( self.timestamps, new_timestamps, side="right" ) idx -= 1 idx = np.clip(idx, 0, idx[-1]) s = self.samples[idx] if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None else: s = np.interp(new_timestamps, self.timestamps, self.samples) if self.invalidation_bits is not None: idx = np.searchsorted( self.timestamps, new_timestamps, side="right" ) idx -= 1 idx = np.clip(idx, 0, idx[-1]) invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None elif kind in "ui": if integer_interpolation_mode == 2: if self.raw and self.conversion: kind = self.conversion.convert(self.samples[:1]).dtype.kind if kind == "f": integer_interpolation_mode = 1 if integer_interpolation_mode == 2: integer_interpolation_mode = 0 if integer_interpolation_mode == 1: s = np.interp( new_timestamps, self.timestamps, self.samples ).astype(self.samples.dtype) if self.invalidation_bits is not None: idx = np.searchsorted( self.timestamps, new_timestamps, side="right" ) idx -= 1 idx = np.clip(idx, 0, idx[-1]) invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None elif integer_interpolation_mode == 0: idx = np.searchsorted( self.timestamps, new_timestamps, side="right" ) idx -= 1 idx = np.clip(idx, 0, idx[-1]) s = self.samples[idx] if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None else: idx = np.searchsorted(self.timestamps, new_timestamps, side="right") idx -= 1 idx = np.clip(idx, 0, idx[-1]) s = self.samples[idx] if self.invalidation_bits is not None: invalidation_bits = self.invalidation_bits[idx] else: invalidation_bits = None return Signal( s, new_timestamps, self.unit, self.name, comment=self.comment, conversion=self.conversion, source=self.source, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=invalidation_bits, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) def __apply_func(self, other, func_name): """delegate operations to the *samples* attribute, but in a time correct manner by considering the *timestamps* """ if isinstance(other, Signal): if len(self) and len(other): start = max(self.timestamps[0], other.timestamps[0]) stop = min(self.timestamps[-1], other.timestamps[-1]) s1 = self.physical().cut(start, stop) s2 = other.physical().cut(start, stop) else: s1 = self s2 = other time = np.union1d(s1.timestamps, s2.timestamps) s = s1.interp(time).samples o = s2.interp(time).samples func = getattr(s, func_name) conversion = None s = func(o) elif other is None: s = self.samples conversion = self.conversion time = self.timestamps else: func = getattr(self.samples, func_name) s = func(other) conversion = self.conversion time = self.timestamps return Signal( samples=s, timestamps=time, unit=self.unit, name=self.name, conversion=conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) def __pos__(self): return self def __neg__(self): return Signal( np.negative(self.samples), self.timestamps, unit=self.unit, name=self.name, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=self.encoding, ) def __round__(self, n): return Signal( np.around(self.samples, n), self.timestamps, unit=self.unit, name=self.name, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=self.encoding, ) def __sub__(self, other): return self.__apply_func(other, "__sub__") def __isub__(self, other): return self.__sub__(other) def __rsub__(self, other): return -self.__sub__(other) def __add__(self, other): return self.__apply_func(other, "__add__") def __iadd__(self, other): return self.__add__(other) def __radd__(self, other): return self.__add__(other) def __truediv__(self, other): return self.__apply_func(other, "__truediv__") def __itruediv__(self, other): return self.__truediv__(other) def __rtruediv__(self, other): return self.__apply_func(other, "__rtruediv__") def __mul__(self, other): return self.__apply_func(other, "__mul__") def __imul__(self, other): return self.__mul__(other) def __rmul__(self, other): return self.__mul__(other) def __floordiv__(self, other): return self.__apply_func(other, "__floordiv__") def __ifloordiv__(self, other): return self.__truediv__(other) def __rfloordiv__(self, other): return 1 / self.__apply_func(other, "__rfloordiv__") def __mod__(self, other): return self.__apply_func(other, "__mod__") def __pow__(self, other): return self.__apply_func(other, "__pow__") def __and__(self, other): return self.__apply_func(other, "__and__") def __or__(self, other): return self.__apply_func(other, "__or__") def __xor__(self, other): return self.__apply_func(other, "__xor__") def __invert__(self): s = ~self.samples time = self.timestamps return Signal( s, time, unit=self.unit, name=self.name, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=self.encoding, ) def __lshift__(self, other): return self.__apply_func(other, "__lshift__") def __rshift__(self, other): return self.__apply_func(other, "__rshift__") def __lt__(self, other): return self.__apply_func(other, "__lt__") def __le__(self, other): return self.__apply_func(other, "__le__") def __gt__(self, other): return self.__apply_func(other, "__gt__") def __ge__(self, other): return self.__apply_func(other, "__ge__") def __eq__(self, other): return self.__apply_func(other, "__eq__") def __ne__(self, other): return self.__apply_func(other, "__ne__") def __iter__(self): for item in (self.samples, self.timestamps, self.unit, self.name): yield item def __reversed__(self): return enumerate(zip(reversed(self.samples), reversed(self.timestamps))) def __len__(self): return len(self.samples) def __abs__(self): return Signal( np.fabs(self.samples), self.timestamps, unit=self.unit, name=self.name, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, ) def __getitem__(self, val): return self.samples[val] def __setitem__(self, idx, val): self.samples[idx] = val def astype(self, np_type): """returns new *Signal* with samples of dtype *np_type* Parameters ---------- np_type : np.dtype new numpy dtye Returns ------- signal : Signal new *Signal* with the samples of *np_type* dtype """ return Signal( self.samples.astype(np_type), self.timestamps, unit=self.unit, name=self.name, conversion=self.conversion, raw=self.raw, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=self.encoding, ) def physical(self): """ get the physical samples values Returns ------- phys : Signal new *Signal* with physical values """ if not self.raw or self.conversion is None: samples = self.samples.copy() encoding = None else: samples = self.conversion.convert(self.samples) if samples.dtype.kind == "S": encoding = "utf-8" if self.conversion.id == b"##CC" else "latin-1" else: encoding = None return Signal( samples, self.timestamps.copy(), unit=self.unit, name=self.name, conversion=None, raw=False, master_metadata=self.master_metadata, display_name=self.display_name, attachment=self.attachment, stream_sync=self.stream_sync, invalidation_bits=self.invalidation_bits, source=self.source, encoding=encoding, group_index=self.group_index, channel_index=self.channel_index, ) def validate(self, copy=True): """appply invalidation bits if they are available for this signal Parameters ---------- copy (True) : bool return a copy of the result .. versionadded:: 5.12.0 """ if self.invalidation_bits is None: signal = self else: idx = np.nonzero(~self.invalidation_bits)[0] signal = Signal( self.samples[idx], self.timestamps[idx], self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=None, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) if copy: signal = signal.copy() return signal def copy(self): """ copy all attributes to a new Signal """ return Signal( self.samples.copy(), self.timestamps.copy(), self.unit, self.name, self.conversion, self.comment, self.raw, self.master_metadata, self.display_name, self.attachment, self.source, self.bit_count, self.stream_sync, invalidation_bits=self.invalidation_bits.copy() if self.invalidation_bits is not None else None, encoding=self.encoding, group_index=self.group_index, channel_index=self.channel_index, ) if __name__ == "__main__": pass
lgpl-3.0
nmayorov/scikit-learn
examples/svm/plot_weighted_samples.py
95
1943
""" ===================== SVM: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. The sample weighting rescales the C parameter, which means that the classifier puts more emphasis on getting these points right. The effect might often be subtle. To emphasize the effect here, we particularly weight outliers, making the deformation of the decision boundary very visible. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm def plot_decision_function(classifier, sample_weight, axis, title): # plot the decision function xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # plot the line, the points, and the nearest vectors to the plane axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone) axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9, cmap=plt.cm.bone) axis.axis('off') axis.set_title(title) # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight_last_ten = abs(np.random.randn(len(X))) sample_weight_constant = np.ones(len(X)) # and bigger weights to some outliers sample_weight_last_ten[15:] *= 5 sample_weight_last_ten[9] *= 15 # for reference, first fit without class weights # fit the model clf_weights = svm.SVC() clf_weights.fit(X, y, sample_weight=sample_weight_last_ten) clf_no_weights = svm.SVC() clf_no_weights.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(14, 6)) plot_decision_function(clf_no_weights, sample_weight_constant, axes[0], "Constant weights") plot_decision_function(clf_weights, sample_weight_last_ten, axes[1], "Modified weights") plt.show()
bsd-3-clause
steveryb/pyprocrustes
visualise.py
1
1106
import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt import Procrustes def load_curve_data(filename): with open(filename) as f: line = f.readline() data = line.split(",") version = data[0] num_points = data[1] data = data[2:] points = [tuple((float(data[i+j]) for j in range(3))) for i in range(0,len(data),3)] return points def draw_curves(curves_to_draw): mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') for curve in curves_to_draw: x,y,z = [[point[i] for point in curve] for i in range(3)] ax.plot(x,y,zs=z, label="curve") ax.legend() plt.show() def visualise(filenames): curves_to_draw = [load_curve_data(filename) for filename in filenames] draw_curves(curves_to_draw) curves = [load_curve_data(f) for f in ["curves/13 1","curves/ref_1.crv"]] curves = Procrustes.superposition(*[np.array(c) for c in curves]) print("distance",Procrustes.min_distance(*curves)) draw_curves(curves)
bsd-2-clause
tuanavu/deep-learning-a-z
DeepLearningA-Z/02-supervised-deep-learning/01-Artificial-Neural-Networks-ANN/Artificial_Neural_Networks/ann.py
5
2306
# Artificial Neural Network # Installing Theano # pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git # Installing Tensorflow # pip install tensorflow # Installing Keras # pip install --upgrade keras # Part 1 - Data Preprocessing # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Churn_Modelling.csv') X = dataset.iloc[:, 3:13].values y = dataset.iloc[:, 13].values # Encoding categorical data from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) onehotencoder = OneHotEncoder(categorical_features = [1]) X = onehotencoder.fit_transform(X).toarray() X = X[:, 1:] # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Part 2 - Now let's make the ANN! # Importing the Keras libraries and packages import keras from keras.models import Sequential from keras.layers import Dense # Initialising the ANN classifier = Sequential() # Adding the input layer and the first hidden layer classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11)) # Adding the second hidden layer classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu')) # Adding the output layer classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) # Compiling the ANN classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Fitting the ANN to the Training set classifier.fit(X_train, y_train, batch_size = 10, epochs = 100) # Part 3 - Making predictions and evaluating the model # Predicting the Test set results y_pred = classifier.predict(X_test) y_pred = (y_pred > 0.5) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred)
mit
iosonofabio/singlet
setup.py
1
2579
#!/usr/bin/env python # vim: fdm=indent ''' author: Fabio Zanini date: 08/08/17 content: Setup script for singlet ''' import sys import os from distutils.log import INFO as logINFO if ((sys.version_info[0] < 3) or (sys.version_info[0] == 3 and sys.version_info[1] < 4)): sys.stderr.write("Error in setup script for singlet:\n") sys.stderr.write("Singlet supports Python 3.4+.") sys.exit(1) # Setuptools but not distutils support build/runtime/optional dependencies try: from setuptools import setup, Extension, find_packages from setuptools.command.build_py import build_py from setuptools import Command kwargs = dict( setup_requires=[ 'PyYAML', 'numpy', 'pandas', 'xarray', 'matplotlib', ], install_requires=[ 'PyYAML', 'numpy', 'pandas', 'xarray', 'matplotlib', ], ) except ImportError: sys.stderr.write("Could not import 'setuptools'," + " falling back to 'distutils'.\n") from distutils.core import setup, Extension, find_packages from distutils.command.build_py import build_py from distutils.cmd import Command kwargs = dict( requires=[ 'PyYAML', 'numpy', 'pandas', 'xarray', 'matplotlib', ] ) # Get version with open('singlet/_version.py') as fversion: version = fversion.readline().rstrip().split(' ')[-1] # Setup function setup(name='singlet', version=version, author='Fabio Zanini', author_email='fabio.zanini@stanford.edu', maintainer='Fabio Zanini', maintainer_email='fabio.zanini@stanford.edu', url='https://github.com/iosonofabio/singlet', description="Single cell RNA Seq analysis", long_description=""" Single cell RNA Seq analysis. **Development**: https://github.com/iosonofabio/singlet **Documentation**: http://singlet.readthedocs.io""", license='GPL3', classifiers=[ 'Development Status :: 3 - Alpha', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: POSIX', 'Programming Language :: Python' ], packages=['singlet'] + ['singlet.' + s for s in find_packages(where='singlet')], **kwargs )
mit
Akshay0724/scikit-learn
examples/calibration/plot_calibration_multiclass.py
95
6971
""" ================================================== Probability Calibration for 3-class classification ================================================== This example illustrates how sigmoid calibration changes predicted probabilities for a 3-class classification problem. Illustrated is the standard 2-simplex, where the three corners correspond to the three classes. Arrows point from the probability vectors predicted by an uncalibrated classifier to the probability vectors predicted by the same classifier after sigmoid calibration on a hold-out validation set. Colors indicate the true class of an instance (red: class 1, green: class 2, blue: class 3). The base classifier is a random forest classifier with 25 base estimators (trees). If this classifier is trained on all 800 training datapoints, it is overly confident in its predictions and thus incurs a large log-loss. Calibrating an identical classifier, which was trained on 600 datapoints, with method='sigmoid' on the remaining 200 datapoints reduces the confidence of the predictions, i.e., moves the probability vectors from the edges of the simplex towards the center. This calibration results in a lower log-loss. Note that an alternative would have been to increase the number of base estimators which would have resulted in a similar decrease in log-loss. """ print(__doc__) # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD Style. import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.ensemble import RandomForestClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import log_loss np.random.seed(0) # Generate data X, y = make_blobs(n_samples=1000, n_features=2, random_state=42, cluster_std=5.0) X_train, y_train = X[:600], y[:600] X_valid, y_valid = X[600:800], y[600:800] X_train_valid, y_train_valid = X[:800], y[:800] X_test, y_test = X[800:], y[800:] # Train uncalibrated random forest classifier on whole train and validation # data and evaluate on test data clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train_valid, y_train_valid) clf_probs = clf.predict_proba(X_test) score = log_loss(y_test, clf_probs) # Train random forest classifier, calibrate on validation data and evaluate # on test data clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train, y_train) clf_probs = clf.predict_proba(X_test) sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit") sig_clf.fit(X_valid, y_valid) sig_clf_probs = sig_clf.predict_proba(X_test) sig_score = log_loss(y_test, sig_clf_probs) # Plot changes in predicted probabilities via arrows plt.figure(0) colors = ["r", "g", "b"] for i in range(clf_probs.shape[0]): plt.arrow(clf_probs[i, 0], clf_probs[i, 1], sig_clf_probs[i, 0] - clf_probs[i, 0], sig_clf_probs[i, 1] - clf_probs[i, 1], color=colors[y_test[i]], head_width=1e-2) # Plot perfect predictions plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1") plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2") plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3") # Plot boundaries of unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex") # Annotate points on the simplex plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)', xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.plot([1.0/3], [1.0/3], 'ko', ms=5) plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)', xy=(.5, .0), xytext=(.5, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)', xy=(.0, .5), xytext=(.1, .5), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)', xy=(.5, .5), xytext=(.6, .6), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $0$, $1$)', xy=(0, 0), xytext=(.1, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($1$, $0$, $0$)', xy=(1, 0), xytext=(1, .1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') plt.annotate(r'($0$, $1$, $0$)', xy=(0, 1), xytext=(.1, 1), xycoords='data', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='center', verticalalignment='center') # Add grid plt.grid("off") for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], 'k', alpha=0.2) plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2) plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2) plt.title("Change of predicted probabilities after sigmoid calibration") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) plt.legend(loc="best") print("Log-loss of") print(" * uncalibrated classifier trained on 800 datapoints: %.3f " % score) print(" * classifier trained on 600 datapoints and calibrated on " "200 datapoint: %.3f" % sig_score) # Illustrate calibrator plt.figure(1) # generate grid over 2-simplex p1d = np.linspace(0, 1, 20) p0, p1 = np.meshgrid(p1d, p1d) p2 = 1 - p0 - p1 p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()] p = p[p[:, 2] >= 0] calibrated_classifier = sig_clf.calibrated_classifiers_[0] prediction = np.vstack([calibrator.predict(this_p) for calibrator, this_p in zip(calibrated_classifier.calibrators_, p.T)]).T prediction /= prediction.sum(axis=1)[:, None] # Plot modifications of calibrator for i in range(prediction.shape[0]): plt.arrow(p[i, 0], p[i, 1], prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1], head_width=1e-2, color=colors[np.argmax(p[i])]) # Plot boundaries of unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex") plt.grid("off") for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], 'k', alpha=0.2) plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2) plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2) plt.title("Illustration of sigmoid calibrator") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) plt.show()
bsd-3-clause
astraw/pairs2groups
pairs2groups/__init__.py
1
13702
"""find homogeneous groups of items based on pairwise information A motivating example -------------------- Suppose you have performed several experiments from four treatments (treatments 1,2,3, and 4). From each treatment, you have collected many independent samples. The question is into what groups of 'statistically not different' the results may be divided. This package, pairs2groups, finds the groups into which the treatments may be divided such that each member of a group is not significantly different than other members in the group. A picture may make this more clear. Samples from each treatment are plotted below in a boxplot. The letters below each box describe the groups each treatment is a member of. For example, all treatments in group 'a' are not statistically significantly different than each other. In this example, treatment 1 is in groups 'a' and 'b'. Therefore, it is statistically significantly different from treatment 4, which is not in either of these groups. Treatment 1 is not statistically significantly different from treatments 2 or 3, which which it shares membership in groups 'b' and 'a', respectively. .. plot:: import matplotlib.pyplot as plt import matplotlib.transforms as mtransforms import numpy as np from pairs2groups import label_homogeneous_groups np.random.seed(2) inc = 0.1 e1 = np.random.uniform(0,1, size=(500,)) e2 = np.random.uniform(0,1, size=(500,)) e3 = np.random.uniform(0,1 + inc, size=(500,)) e4 = np.random.uniform(0,1 + 2*inc, size=(500,)) treatments = [e1,e2,e3,e4] group_info = label_homogeneous_groups(treatments) fig = plt.figure() ax = fig.add_subplot(111) pos = np.array(range(len(treatments)))+1 bp = ax.boxplot( treatments, notch=0, sym='k+', patch_artist=True, positions=pos ) text_transform= mtransforms.blended_transform_factory(ax.transData, ax.transAxes) for i,label in enumerate(group_info['group_strings']): ax.text( pos[i], 0.02, label, transform=text_transform,ha='center') ax.set_xlabel('treatment') ax.set_ylabel('response') ax.set_ylim(-0.2, 1.4) plt.setp(bp['whiskers'], color='k', linestyle='-' ) plt.setp(bp['fliers'], markersize=3.0) fig.subplots_adjust(right=0.99,top=0.99) plt.show() Problem definition ------------------ The problem is how to find the (minimal but complete) set of homogeneous groups of a collection of items. A homogeneous group is defined to have no member that is 'different' (defined below) than any other member. Consider the problem of *n* items and pairwise knowledge of whether each item is either 'different' or 'not different' from every other item. This property of 'different' is commutative (A is different than B means B is different than A), but not transtive (A is different than B is different than C does not specify the relation between A and C). How to construct groups such that every member population of a group is not different than the other populations in the group? Development information ----------------------- The source code and issue tracker for this library are at http://github.com/astraw/pairs2groups """ import pairs2groups.util as util __version__ = '1.0.0' # also ../setup.py and ../doc-src/conf.py def find_homogeneous_groups( different_pairs, N_items ): """Find all homogeneous groups of not-different pairs. The algorithm used is as follows, where *S* is the set of all *n* items. 1. Set *k* equal *n*, and *T* equal *S*. 2. Set *m* equal *n* choose *k*. Take all (*m* in number) *k* element subsets of *T*. Denote the *i*th subset of *T* as *U_i*. 3. For *i* in (0, ..., m-1): 3a. If no pair within *U_i* is different, then *U_i* is a group. Remember it. 3b. Else, set *k* equal *k-1*, and *T* equal *U_i*. Goto 2. Parameters ---------- different_pairs : list of 2-tuples A list of pairs specifying different between two items N_popupations : int The number of items in the population Example ------- >>> diff = [ (0,2) ] >>> N = 3 >>> find_homogeneous_groups( diff, N ) [(1, 2), (0, 1)] """ # setup initial values S = frozenset( range(N_items) ) k = N_items assert k == len(S) T = S.copy() different_pairs = set([ util.UnorderedPair((p[0],p[1])) for p in different_pairs]) # define the recursive function def _f( T, k, already_descended ): this_good_sets = [] U = util.get_k_element_subsets( k, T ) m = len(U) for i, U_i in enumerate(U): if U_i in already_descended: continue else: already_descended.add(U_i) pairs = frozenset(util.get_all_pairs(U_i)) if len( pairs.intersection( different_pairs )): if k >= 3: child_good_sets = _f( U_i, k-1, already_descended) this_good_sets.extend( child_good_sets ) else: this_good_sets.append( U_i ) return this_good_sets # call the recursive function good_sets = _f(T,k,set()) # remove non-unique by a round-trip through set good_sets = list(set(good_sets)) final_sets = util.remove_overlapping_subsets( good_sets ) return [ tuple(f) for f in final_sets] def test_find_homogeneous_groups(): N = 2 diff = [ (0,1) ] expected = [] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 2 diff = [] expected = [ (0,1) ] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 3 diff = [] expected = [ (0,1,2) ] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 3 diff = [ (0,1) ] expected = [ (0,2), (1,2) ] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 3 diff = [ (1,2) ] expected = [ (0,1), (0,2) ] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 3 diff = [ (0,2) ] expected = [ (0,1), (1,2) ] actual = find_homogeneous_groups( diff, N ) assert util.is_list_of_sets_equal(actual,expected) N = 5 diff = [ (0,2), (0,3), (2,3), ] find_homogeneous_groups( diff, N ) def test_find_homogeneous_groups_pathology1(): # I was having trouble with this set of data diff_pairs = [(0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (3, 5), (3, 6), (3, 7), (3, 8), (4, 6), (4, 7), (4, 8), (5, 6), (5, 7), (5, 8), (6, 7), (6, 8), (7, 8)] N = 9 groups = find_homogeneous_groups( diff_pairs, N ) for group in groups: for a,b in diff_pairs: if a in group: assert b not in group def test_find_homogeneous_groups_pathology2(): # I was having trouble with this set of data diff_pairs = [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 8), (0, 11), (1, 2), (1, 3), (1, 5), (1, 8), (1, 11), (5, 9), (5, 10), (5, 12), (9, 11)] N = 13 groups = find_homogeneous_groups( diff_pairs, N ) for group in groups: for a,b in diff_pairs: if a in group: assert b not in group def label_homogeneous_groups(populations, significance_level=0.05, two_tailed = True, force_letter = True, test = 'mannwhitneyu', ): """perform statistical comparisons and call :func:`find_homogeneous_groups` The statistical test used is specificed by the parameter 'test'. Parameters ---------- populations : A sequence of of sequences The list of populations to analyze. significance_level : float, optional The significance level required to determine two groups different. two_tailed : bool, optional Whether the comparison is two-tailed. force_letter : bool, optional If true (the default), each population gets assigned a letter test : string, optional The test to use. (Default: 'mannwhitneyu'. Also 'kruskal'.) Returns ------- group_info : dictionary These keys are returned: 'groups' specifies the population indices for each group, 'group_strings' assigns letters to each group and returns a list of strings for each item, 'p_values' is a matrix of p values, 'medians' is the median of each population. Examples -------- This example generates four populations. Three from the same distribution, and the last from a different distribution. Then, :func:`label_homogeneous_groups` is used to find which of these populations belong to statistically non significantly different groups. >>> import numpy as np >>> pop1 = np.random.normal(size=(100,)) >>> pop2 = np.random.normal(size=(100,)) >>> pop3 = np.random.normal(size=(100,)) >>> pop4 = np.random.normal(size=(100,)) + 2 >>> populations = [pop1, pop2, pop3, pop4] >>> group_info = label_homogeneous_groups(populations) >>> group_info # doctest: +SKIP {'p_values': array([[ NaN, 0.578, 0.705 , 0. ], [ 0.578, NaN, 0.855, 0. ], [ 0.705 , 0.855, NaN, 0. ], [ 0. , 0. , 0. , NaN]]), 'medians': [0.071, -0.010, -0.0156, 2.054], 'group_strings': ['a', 'a', 'a', ''], 'groups': [(0, 1, 2)]} """ import scipy.stats import numpy as np # create pairwise differences n_comparisons = (len(populations)**2 - len(populations))/2 diff_pairs = [] p_values = np.nan*np.ones( (len(populations), len(populations))) medians = [ np.median( x ) for x in populations ] num_samples = [ len( x ) for x in populations ] for i in range(len(populations)): for j in range(len(populations)): if not i < j: continue pair = (i,j) A = populations[i] B = populations[j] if test=='kruskal': if not two_tailed: raise NotImplementedError('') h,p = scipy.stats.kruskal(A,B) else: if test != 'mannwhitneyu': raise ValueError('unknown test %s'%test) fail = False try: U,p1=scipy.stats.mannwhitneyu(A,B) except ValueError as err: if err.message =='All numbers are identical in amannwhitneyu': fail = True else: raise err if fail: p = np.nan else: if two_tailed: p = p1*2 else: p = p1 p_values[i,j] = p sig = significance_level/n_comparisons # Bonferroni correction if p < sig: different = True else: different = False if different: diff_pairs.append( pair ) groups = find_homogeneous_groups( diff_pairs, len(populations) ) group_strs = [] group_order = -1*np.ones((len(groups),),dtype=np.int) next_group = 0 for i in range(len(populations)): mystr = [] for j in range(len(groups)): if i in groups[j]: order = group_order[j] if order==-1: order = next_group next_group += 1 group_order[j] = order mystr += [ chr( order+ord('a') ) ] if len(mystr): mystr.sort() mystr = ''.join(mystr) else: if force_letter: order = next_group next_group += 1 mystr = chr( order+ord('a') ) else: mystr = '' group_strs.append( mystr ) # make the p_value matrix symmetric for i in range(len(populations)): for j in range(len(populations)): if not i > j: continue p_values[i,j] = p_values[j,i] group_info = dict( groups=groups, group_strings=group_strs, p_values=p_values, medians=medians, num_samples=num_samples, ) return group_info def label_homogeneous_groups_pandas(data, groupby_column_name, value_column_name='value', **kwargs): """perform statistical comparisons on a pandas DataFrame object. This calls :func:`label_homogeneous_groups` after using the groupby() method on the data. """ names = [] values = [] for level, group in data.groupby(groupby_column_name): names.append( level ) values.append( group[value_column_name].values ) group_info = label_homogeneous_groups( values, **kwargs ) group_info['group_names']=names return group_info
mit
saimn/astropy
astropy/visualization/wcsaxes/transforms.py
8
5762
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Note: This file incldues code dervived from pywcsgrid2 # # This file contains Matplotlib transformation objects (e.g. from pixel to world # coordinates, but also world-to-world). import abc import numpy as np from matplotlib.path import Path from matplotlib.transforms import Transform from astropy import units as u from astropy.coordinates import (SkyCoord, frame_transform_graph, UnitSphericalRepresentation, BaseCoordinateFrame) __all__ = ['CurvedTransform', 'CoordinateTransform', 'World2PixelTransform', 'Pixel2WorldTransform'] class CurvedTransform(Transform, metaclass=abc.ABCMeta): """ Abstract base class for non-affine curved transforms """ input_dims = 2 output_dims = 2 is_separable = False def transform_path(self, path): """ Transform a Matplotlib Path Parameters ---------- path : :class:`~matplotlib.path.Path` The path to transform Returns ------- path : :class:`~matplotlib.path.Path` The resulting path """ return Path(self.transform(path.vertices), path.codes) transform_path_non_affine = transform_path def transform(self, input): raise NotImplementedError("") def inverted(self): raise NotImplementedError("") class CoordinateTransform(CurvedTransform): has_inverse = True def __init__(self, input_system, output_system): super().__init__() self._input_system_name = input_system self._output_system_name = output_system if isinstance(self._input_system_name, str): frame_cls = frame_transform_graph.lookup_name(self._input_system_name) if frame_cls is None: raise ValueError(f"Frame {self._input_system_name} not found") else: self.input_system = frame_cls() elif isinstance(self._input_system_name, BaseCoordinateFrame): self.input_system = self._input_system_name else: raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance") if isinstance(self._output_system_name, str): frame_cls = frame_transform_graph.lookup_name(self._output_system_name) if frame_cls is None: raise ValueError(f"Frame {self._output_system_name} not found") else: self.output_system = frame_cls() elif isinstance(self._output_system_name, BaseCoordinateFrame): self.output_system = self._output_system_name else: raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance") if self.output_system == self.input_system: self.same_frames = True else: self.same_frames = False @property def same_frames(self): return self._same_frames @same_frames.setter def same_frames(self, same_frames): self._same_frames = same_frames def transform(self, input_coords): """ Transform one set of coordinates to another """ if self.same_frames: return input_coords input_coords = input_coords*u.deg x_in, y_in = input_coords[:, 0], input_coords[:, 1] c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in), frame=self.input_system) # We often need to transform arrays that contain NaN values, and filtering # out the NaN values would have a performance hit, so instead we just pass # on all values and just ignore Numpy warnings with np.errstate(all='ignore'): c_out = c_in.transform_to(self.output_system) lon = c_out.spherical.lon.deg lat = c_out.spherical.lat.deg return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1) transform_non_affine = transform def inverted(self): """ Return the inverse of the transform """ return CoordinateTransform(self._output_system_name, self._input_system_name) class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta): """ Base transformation from world to pixel coordinates """ has_inverse = True frame_in = None @property @abc.abstractmethod def input_dims(self): """ The number of input world dimensions """ @abc.abstractmethod def transform(self, world): """ Transform world to pixel coordinates. You should pass in a NxM array where N is the number of points to transform, and M is the number of dimensions. This then returns the (x, y) pixel coordinates as a Nx2 array. """ @abc.abstractmethod def inverted(self): """ Return the inverse of the transform """ class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta): """ Base transformation from pixel to world coordinates """ has_inverse = True frame_out = None @property @abc.abstractmethod def output_dims(self): """ The number of output world dimensions """ @abc.abstractmethod def transform(self, pixel): """ Transform pixel to world coordinates. You should pass in a Nx2 array of (x, y) pixel coordinates to transform to world coordinates. This will then return an NxM array where M is the number of dimensions. """ @abc.abstractmethod def inverted(self): """ Return the inverse of the transform """
bsd-3-clause
Bihaqo/exp-machines
src/objectives/logistic.py
1
2008
""" Utils related to the logistic regression. """ import numpy as np from sklearn.linear_model import LogisticRegression def log1pexp(x): """log(1 + exp(x))""" return np.logaddexp(0, x) def sigmoid(x): return 1.0 / log1pexp(-x) def binary_logistic_loss(linear_o, y): """Returns a vector of logistic losses of each object. Given a vector of linear ouputs a vector of ground truth target values y returns logistic losses with respect to the linear outputs. Linear outputs can be e.g. <w, x_i> + b. """ return log1pexp(-y.flatten() * linear_o.flatten()) / linear_o.size def binary_logistic_loss_grad(linear_o, y): """Derivative of the binary_logistic_loss w.r.t. the linear output""" # Sometimes denom overflows, but it's OK, since if it's very large, it would # be set to INF and the output correctly takes the value of 0. # TODO: Fix overflow warnings. denom = 1 + np.exp(y.flatten() * linear_o.flatten()) return -y / (denom * linear_o.size) def _multinomial_loss(linear_o, y): raise NotImplementedError() def preprocess(X, y, info=None): """Prepare the data for the learning""" if info is None: info = {} info['classes'] = np.unique(y) n_classes = info['classes'].size if n_classes < 2: raise ValueError("This solver needs samples of 2 classes" " in the data, but the data contains only one" " class: %r." % info['classes'][0]) if n_classes > 2: raise NotImplementedError("multiclass is not implemented yet.") idx_min_1 = (y == info['classes'][0]) y = np.ones(y.shape) y[idx_min_1] = -1 return X, y, info def linear_init(X, y, fit_intercept=True): logreg = LogisticRegression(fit_intercept=fit_intercept) logreg.fit(X, y) if fit_intercept: intercept = logreg.intercept_[0] else: intercept = logreg.intercept_ return logreg.coef_[0, :], intercept
mit
andrescv/BehavioralCloning
utils.py
1
2382
''' _ _ _ _ _ | | | | |_(_) |___ | |_| | _| | (_-< \___/ \__|_|_/__/ Behavioral Cloning Project Utilities ''' import os import h5py import numpy as np from scipy.misc import imresize from sklearn.utils import shuffle try: from sklearn.model_selection import train_test_split except ImportError: from sklearn.cross_validation import train_test_split def loadDataset(path): f = h5py.File(path, 'r') dataset = {} for key in f.keys(): dataset[key] = f[key][...] f.close() return dataset def augmentAndSplitData(images, angles, size=0.2): mask = angles != 0 images_new = images[mask].copy() angles_new = angles[mask] * -1. for i in range(len(images_new)): images_new[i, ...] = np.fliplr(images_new[i, ...]) images = np.concatenate((images, images_new)) angles = np.concatenate((angles, angles_new)) X1, X2, y1, y2 = train_test_split(images, angles, test_size=size, random_state=0) return (X1, y1), (X2, y2) def normalize(img): '''Normalize (Inception Style)''' img = img.astype('float32') img -= 128. img /= 128. return img def preprocessRawImage(img): '''Preprocess a raw simulator image''' img = img[60:140, ...] # crop, discard parts of the image that may confuse the model img = imresize(img, (80, 80)) # resize (80x80) return normalize(img) # normalize def batchGenerator(dataset, batch_size=32, training=False): '''batch generator function''' X, y = dataset N = len(X) total_batches = int(np.ceil(float(N) / batch_size)) while True: indices = np.arange(N) # shuffle indices if training is True if training: indices = shuffle(indices) for i in range(total_batches): S = i * batch_size E = min((i + 1) * batch_size, N) # select indices selected_indices = indices[S:E] nb = len(selected_indices) # actual batch X_batch = np.zeros((nb, 80, 80, 3), dtype='float32') y_batch = np.zeros((nb,), dtype='float32') j = 0 for index in selected_indices: img, angle = X[index], y[index] X_batch[j], y_batch[j] = normalize(img), angle j += 1 yield X_batch, y_batch
mit
ChanderG/scikit-learn
sklearn/datasets/tests/test_20news.py
280
3045
"""Test the 20news downloader, if the data is available.""" import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest from sklearn import datasets def test_20news(): try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract a reduced dataset data2cats = datasets.fetch_20newsgroups( subset='all', categories=data.target_names[-1:-3:-1], shuffle=False) # Check that the ordering of the target_names is the same # as the ordering in the full dataset assert_equal(data2cats.target_names, data.target_names[-2:]) # Assert that we have only 0 and 1 as labels assert_equal(np.unique(data2cats.target).tolist(), [0, 1]) # Check that the number of filenames is consistent with data/target assert_equal(len(data2cats.filenames), len(data2cats.target)) assert_equal(len(data2cats.filenames), len(data2cats.data)) # Check that the first entry of the reduced dataset corresponds to # the first entry of the corresponding category in the full dataset entry1 = data2cats.data[0] category = data2cats.target_names[data2cats.target[0]] label = data.target_names.index(category) entry2 = data.data[np.where(data.target == label)[0][0]] assert_equal(entry1, entry2) def test_20news_length_consistency(): """Checks the length consistencies within the bunch This is a non-regression test for a bug present in 0.16.1. """ try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract the full dataset data = datasets.fetch_20newsgroups(subset='all') assert_equal(len(data['data']), len(data.data)) assert_equal(len(data['target']), len(data.target)) assert_equal(len(data['filenames']), len(data.filenames)) def test_20news_vectorized(): # This test is slow. raise SkipTest("Test too slow.") bunch = datasets.fetch_20newsgroups_vectorized(subset="train") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314, 107428)) assert_equal(bunch.target.shape[0], 11314) assert_equal(bunch.data.dtype, np.float64) bunch = datasets.fetch_20newsgroups_vectorized(subset="test") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (7532, 107428)) assert_equal(bunch.target.shape[0], 7532) assert_equal(bunch.data.dtype, np.float64) bunch = datasets.fetch_20newsgroups_vectorized(subset="all") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314 + 7532, 107428)) assert_equal(bunch.target.shape[0], 11314 + 7532) assert_equal(bunch.data.dtype, np.float64)
bsd-3-clause
abhisg/scikit-learn
examples/ensemble/plot_forest_importances.py
168
1793
""" ========================================= Feature importances with forests of trees ========================================= This examples shows the use of forests of trees to evaluate the importance of features on an artificial classification task. The red bars are the feature importances of the forest, along with their inter-trees variability. As expected, the plot suggests that 3 features are informative, while the remaining are not. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(X.shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show()
bsd-3-clause
rpmunoz/DECam
data_reduction/pipeline_python/decam_pipeline.py
1
1303
#! /usr/bin/env python import warnings warnings.filterwarnings("ignore") import sys,os import os.path import time import subprocess import numpy as np import pyfits import multiprocessing, Queue import ctypes import matplotlib.pyplot as plt import scipy.interpolate from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel from astropy.stats import sigma_clipped_stats from photutils import detect_sources, segment_properties, properties_table from photutils.background import Background from scipy.interpolate import Rbf from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) def main(argv): recipe =argv[0] input_file = '' output_file = '' try: opts, args = getopt.getopt(argv[1:],"hi:o:",["input=","output="]) except getopt.GetoptError: print 'decam_pipeline.py -i <input_file> -o <output_file>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'decam_pipeline.py -i <input_file> -o <output_file>' sys.exit() elif opt in ("-i", "--input"): input_file = arg elif opt in ("-o", "--output"): output_file = arg print 'Recipe is "', recipe print 'Input file is "', input_file print 'Output file is "', output_file if __name__ == "__main__": main(sys.argv[1:]) n_cpu=2 n_core=6 n_processes=n_cpu*n_core*1
apache-2.0
harisbal/pandas
pandas/tests/extension/json/array.py
2
6299
"""Test extension array for storing nested data in a pandas container. The JSONArray stores lists of dictionaries. The storage mechanism is a list, not an ndarray. Note: We currently store lists of UserDicts (Py3 only). Pandas has a few places internally that specifically check for dicts, and does non-scalar things in that case. We *want* the dictionaries to be treated as scalars, so we hack around pandas by using UserDicts. """ import collections import itertools import numbers import random import string import sys import numpy as np from pandas.core.dtypes.base import ExtensionDtype from pandas import compat from pandas.core.arrays import ExtensionArray class JSONDtype(ExtensionDtype): type = compat.Mapping name = 'json' try: na_value = collections.UserDict() except AttributeError: # source compatibility with Py2. na_value = {} @classmethod def construct_array_type(cls): """Return the array type associated with this dtype Returns ------- type """ return JSONArray @classmethod def construct_from_string(cls, string): if string == cls.name: return cls() else: raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string)) class JSONArray(ExtensionArray): dtype = JSONDtype() __array_priority__ = 1000 def __init__(self, values, dtype=None, copy=False): for val in values: if not isinstance(val, self.dtype.type): raise TypeError("All values must be of type " + str(self.dtype.type)) self.data = values # Some aliases for common attribute names to ensure pandas supports # these self._items = self._data = self.data # those aliases are currently not working due to assumptions # in internal code (GH-20735) # self._values = self.values = self.data @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): return cls(scalars) @classmethod def _from_factorized(cls, values, original): return cls([collections.UserDict(x) for x in values if x != ()]) def __getitem__(self, item): if isinstance(item, numbers.Integral): return self.data[item] elif isinstance(item, np.ndarray) and item.dtype == 'bool': return self._from_sequence([x for x, m in zip(self, item) if m]) elif isinstance(item, compat.Iterable): # fancy indexing return type(self)([self.data[i] for i in item]) else: # slice return type(self)(self.data[item]) def __setitem__(self, key, value): if isinstance(key, numbers.Integral): self.data[key] = value else: if not isinstance(value, (type(self), compat.Sequence)): # broadcast value value = itertools.cycle([value]) if isinstance(key, np.ndarray) and key.dtype == 'bool': # masking for i, (k, v) in enumerate(zip(key, value)): if k: assert isinstance(v, self.dtype.type) self.data[i] = v else: for k, v in zip(key, value): assert isinstance(v, self.dtype.type) self.data[k] = v def __len__(self): return len(self.data) def __repr__(self): return 'JSONArary({!r})'.format(self.data) @property def nbytes(self): return sys.getsizeof(self.data) def isna(self): return np.array([x == self.dtype.na_value for x in self.data], dtype=bool) def take(self, indexer, allow_fill=False, fill_value=None): # re-implement here, since NumPy has trouble setting # sized objects like UserDicts into scalar slots of # an ndarary. indexer = np.asarray(indexer) msg = ("Index is out of bounds or cannot do a " "non-empty take from an empty array.") if allow_fill: if fill_value is None: fill_value = self.dtype.na_value # bounds check if (indexer < -1).any(): raise ValueError try: output = [self.data[loc] if loc != -1 else fill_value for loc in indexer] except IndexError: raise IndexError(msg) else: try: output = [self.data[loc] for loc in indexer] except IndexError: raise IndexError(msg) return self._from_sequence(output) def copy(self, deep=False): return type(self)(self.data[:]) def astype(self, dtype, copy=True): # NumPy has issues when all the dicts are the same length. # np.array([UserDict(...), UserDict(...)]) fails, # but np.array([{...}, {...}]) works, so cast. return np.array([dict(x) for x in self], dtype=dtype, copy=copy) def unique(self): # Parent method doesn't work since np.array will try to infer # a 2-dim object. return type(self)([ dict(x) for x in list({tuple(d.items()) for d in self.data}) ]) @classmethod def _concat_same_type(cls, to_concat): data = list(itertools.chain.from_iterable([x.data for x in to_concat])) return cls(data) def _values_for_factorize(self): frozen = self._values_for_argsort() return frozen, () def _values_for_argsort(self): # Disable NumPy's shape inference by including an empty tuple... # If all the elemnts of self are the same size P, NumPy will # cast them to an (N, P) array, instead of an (N,) array of tuples. frozen = [()] + [tuple(x.items()) for x in self] return np.array(frozen, dtype=object)[1:] def make_data(): # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer return [collections.UserDict([ (random.choice(string.ascii_letters), random.randint(0, 100)) for _ in range(random.randint(0, 10))]) for _ in range(100)]
bsd-3-clause